diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c4d79b2d..749a9bcf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,18 +1,23 @@ name: CI on: push: - branches: - - main + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' pull_request: - branches: - - main - - next + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: + timeout-minutes: 10 name: lint - runs-on: ubuntu-latest - if: github.repository == 'openai/openai-ruby' + runs-on: ${{ github.repository == 'stainless-sdks/openai-ruby' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -20,27 +25,24 @@ jobs: uses: ruby/setup-ruby@v1 with: bundler-cache: false - ruby-version: '3.1' - run: |- bundle install - name: Run lints run: ./scripts/lint test: + timeout-minutes: 10 name: test - runs-on: ubuntu-latest - if: github.repository == 'openai/openai-ruby' - + runs-on: ${{ github.repository == 'stainless-sdks/openai-ruby' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 - name: Set up Ruby uses: ruby/setup-ruby@v1 with: bundler-cache: false - ruby-version: '3.1' - run: |- bundle install - name: Run tests run: ./scripts/test - diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index ba6cb5ea..211eea8e 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -22,7 +22,16 @@ jobs: repo: ${{ github.event.repository.full_name }} stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} + - name: Publish to RubyGems.org + if: ${{ steps.release.outputs.releases_created }} + run: | + bash ./bin/publish-gem + env: + # `RUBYGEMS_HOST` is only required for private gem repositories, not https://rubygems.org + RUBYGEMS_HOST: ${{ secrets.OPENAI_RUBYGEMS_HOST || secrets.RUBYGEMS_HOST }} + GEM_HOST_API_KEY: ${{ secrets.OPENAI_GEM_HOST_API_KEY || secrets.GEM_HOST_API_KEY }} + - name: Update RubyDocs if: ${{ steps.release.outputs.releases_created }} run: | - curl -i -H "Content-Type: application/json" -X POST -d '{"repository":{"url":"https://github.com/openai/openai-ruby"}}' https://www.rubydoc.info/checkout + curl --request POST --include --header 'Content-Type: application/json' --data '{"repository":{"url":"https://github.com/openai/openai-ruby"}}' -- https://www.rubydoc.info/checkout diff --git a/.github/workflows/publish-gem.yml b/.github/workflows/publish-gem.yml index 48dbf6e6..e4185573 100644 --- a/.github/workflows/publish-gem.yml +++ b/.github/workflows/publish-gem.yml @@ -1,5 +1,5 @@ # workflow for re-running publishing to rubygems.org in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-gem.yml +# you can run this workflow by navigating to https://www.github.com/openai/openai-ruby/actions/workflows/publish-gem.yml name: Publish Gem on: workflow_dispatch: @@ -15,7 +15,6 @@ jobs: uses: ruby/setup-ruby@v1 with: bundler-cache: false - ruby-version: '3.1' - run: |- bundle install diff --git a/.gitignore b/.gitignore index 1ef280e1..3d26ceed 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,10 @@ -.prism.log +*.gem .idea/ +.ignore +.prism.log .ruby-lsp/ .yardoc/ -doc/ -sorbet/ -Brewfile.lock.json bin/tapioca -*.gem +Brewfile.lock.json +doc/ +sorbet/tapioca/* diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c4762802..e7562934 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.0.1-alpha.0" + ".": "0.19.0" } \ No newline at end of file diff --git a/.rubocop.yml b/.rubocop.yml index 17e6abbd..decbf212 100644 --- a/.rubocop.yml +++ b/.rubocop.yml @@ -1,3 +1,4 @@ +# yaml-language-server: $schema=https://www.rubyschema.org/rubocop.json --- # Explicitly disable pending cops for now. This is the default behaviour but # this avoids a large warning every time we run it. @@ -8,7 +9,11 @@ AllCops: - "bin/*" NewCops: enable SuggestExtensions: false - TargetRubyVersion: 3.1.0 + TargetRubyVersion: 3.2 + +# Whether MFA is required or not should be left to the token configuration. +Gemspec/RequireMFA: + Enabled: false # Don't require this extra line break, it can be excessive. Layout/EmptyLineAfterGuardClause: @@ -35,7 +40,9 @@ Layout/LineLength: AllowedPatterns: - "^\\s*#.*$" - ^require(_relative)? - - "OpenAI::(Models|Resources)::" + - "OpenAI::Internal::Type::BaseModel$" + - "^\\s*[A-Z0-9_]+ = :" + - "OpenAI::(Models|Resources|Test)::" Max: 110 Layout/MultilineArrayLineBreaks: @@ -57,23 +64,60 @@ Layout/MultilineMethodParameterLineBreaks: # Prefer compact hash literals. Layout/SpaceInsideHashLiteralBraces: EnforcedStyle: no_space + Exclude: + - "**/*.rbi" + +Lint/BooleanSymbol: + Enabled: false + +# This option occasionally mangles identifier names +Lint/DeprecatedConstants: + Exclude: + - "**/*.rbi" + +# We use pattern assertion in tests to ensure correctness. +Lint/DuplicateMatchPattern: + Exclude: + - "test/**/*" # Fairly useful in tests for pattern assertions. Lint/EmptyInPattern: Exclude: - "test/**/*" +Lint/MissingCopEnableDirective: + Exclude: + - "examples/**/*.rb" + Lint/MissingSuper: Exclude: - "**/*.rbi" +Lint/SymbolConversion: + Exclude: + - "**/*.rbi" + # Disabled for safety reasons, this option changes code semantics. Lint/UnusedMethodArgument: AutoCorrect: false +# This option is prone to causing accidental bugs. +Lint/UselessAssignment: + AutoCorrect: false + Exclude: + - "examples/**/*.rb" + Metrics/AbcSize: Enabled: false +Metrics/BlockLength: + AllowedPatterns: + - assert_pattern + - type_alias + - define_sorbet_constant! + Exclude: + - "**/*.rbi" + Metrics/ClassLength: Enabled: false @@ -83,17 +127,30 @@ Metrics/CyclomaticComplexity: Metrics/MethodLength: Enabled: false +Metrics/ModuleLength: + Enabled: false + Metrics/ParameterLists: Enabled: false Metrics/PerceivedComplexity: Enabled: false +Naming/AccessorMethodName: + Enabled: false + +# Need to preserve block identifier for documentation. Naming/BlockForwarding: - Exclude: - - "**/*.rbi" + Enabled: false + +# Underscores are generally useful for disambiguation. +Naming/ClassAndModuleCamelCase: + Enabled: false Naming/MethodParameterName: + Enabled: false + +Naming/PredicatePrefix: Exclude: - "**/*.rbi" @@ -116,6 +173,9 @@ Style/Alias: Style/AndOr: EnforcedStyle: always +Style/ArgumentsForwarding: + Enabled: false + Style/BisectedAttrAccessor: Exclude: - "**/*.rbi" @@ -125,6 +185,9 @@ Style/ClassAndModuleChildren: Exclude: - "test/**/*" +Style/CommentAnnotation: + Enabled: false + # We should go back and add these docs, but ignore for now. Style/Documentation: Enabled: false @@ -163,6 +226,9 @@ Style/MethodCallWithArgsParentheses: Exclude: - "**/*.gemspec" +Style/MultilineBlockChain: + Enabled: false + # Perfectly fine. Style/MultipleComparison: Enabled: false @@ -190,6 +256,10 @@ Style/RedundantInitialize: Exclude: - "**/*.rbi" +Style/RedundantParentheses: + Exclude: + - "**/*.rbi" + # Prefer slashes for regex literals. Style/RegexpLiteral: EnforcedStyle: slashes @@ -198,6 +268,11 @@ Style/RegexpLiteral: Style/SafeNavigation: Enabled: false +Style/SignalException: + Exclude: + - Rakefile + - "**/*.rake" + # We use these sparingly, where we anticipate future branches for the # inner conditional. Style/SoleNestedConditional: @@ -210,3 +285,8 @@ Style/StringLiterals: # Prefer explicit symbols for clarity; you can search for `:the_symbol`. Style/SymbolArray: EnforcedStyle: brackets + +# This option makes examples harder to read for ruby novices. +Style/SymbolProc: + Exclude: + - "examples/**/*.rb" diff --git a/.ruby-version b/.ruby-version new file mode 100644 index 00000000..944880fa --- /dev/null +++ b/.ruby-version @@ -0,0 +1 @@ +3.2.0 diff --git a/.solargraph.yml b/.solargraph.yml new file mode 100644 index 00000000..18a89fcb --- /dev/null +++ b/.solargraph.yml @@ -0,0 +1,11 @@ +--- +max_files: 0 +include: + - '*.gemspec' + - 'Rakefile' + - 'examples/**/*.rb' + - 'lib/**/*.rb' + - 'test/openai/resource_namespaces.rb' + - 'test/openai/test_helper.rb' +exclude: + - 'rbi/**/*' diff --git a/.stats.yml b/.stats.yml index f18d6148..6725d699 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,4 @@ -configured_endpoints: 80 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml +configured_endpoints: 117 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ddbdf9343316047e8a773c54fb24e4a8d225955e202a1888fde6f9c8898ebf98.yml +openapi_spec_hash: 9802f6dd381558466c897f6e387e06ca +config_hash: fe0ea26680ac2075a6cd66416aefe7db diff --git a/.yardopts b/.yardopts index 29c933bc..84c12f2a 100644 --- a/.yardopts +++ b/.yardopts @@ -1 +1,6 @@ +--type-name-tag generic:Generic +--default-return void --markup markdown +--markup-provider redcarpet +--exclude /rbi +--exclude /sig diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..28f9cf3b --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,125 @@ +## Setting up the environment + +This repository contains a `.ruby-version` file, which should work with either [rbenv](https://github.com/rbenv/rbenv) or [asdf](https://github.com/asdf-vm/asdf) with the [ruby plugin](https://github.com/asdf-vm/asdf-ruby). + +Please follow the instructions for your preferred version manager to install the Ruby version specified in the `.ruby-version` file. + +To set up the repository, run: + +```bash +$ ./scripts/bootstrap +``` + +This will install all the required dependencies. + +## Modifying/Adding code + +Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never modify the contents of `lib/openai/helpers/` and `examples/` directory. + +## Adding and running examples + +All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. + +```ruby +#!/usr/bin/env ruby +# frozen_string_literal: true + +require_relative "../lib/openai" + +# ... +``` + +```bash +$ chmod +x './examples/.rb' + +# run the example against your api +$ ruby './examples/.rb' +``` + +## Using the repository from source + +If you’d like to use the repository from source, you can either install from git or reference a cloned repository: + +To install via git in your `Gemfile`: + +```ruby +gem "openai", git: "https://www.github.com/openai/openai-ruby" +``` + +Alternatively, reference local copy of the repo: + +```bash +$ git clone -- 'https://www.github.com/openai/openai-ruby' '' +``` + +```ruby +gem "openai", path: "" +``` + +## Running commands + +Running `rake` by itself will show all runnable commands. + +```bash +$ bundle exec rake +``` + +## Running tests + +Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. + +```bash +$ npx prism mock path/to/your/openapi.yml +``` + +```bash +$ bundle exec rake test +``` + +## Linting and formatting + +This repository uses [rubocop](https://github.com/rubocop/rubocop) for linting and formatting of `*.rb` files; And [syntax_tree](https://github.com/ruby-syntax-tree/syntax_tree) is used for formatting of both `*.rbi` and `*.rbs` files. + +There are two separate type checkers supported by this library: [sorbet](https://github.com/sorbet/sorbet) and [steep](https://github.com/soutaro/steep) are used for verifying `*.rbi` and `*.rbs` files respectively. + +To lint and typecheck: + +```bash +$ bundle exec rake lint +``` + +To format and fix all lint issues automatically: + +```bash +$ bundle exec rake format +``` + +## Editor Support + +### Ruby LSP + +[Ruby LSP](https://github.com/Shopify/ruby-lsp) has quite good support for go to definition, but not auto-completion. + +This can be installed along side Solargraph. + +### Solargraph + +[Solargraph](https://solargraph.org) has quite good support for auto-completion, but not go to definition. + +This can be installed along side Ruby LSP. + +### Sorbet + +[Sorbet](https://sorbet.org) should mostly work out of the box when editing this library directly. However, there are a some caveats due to the colocation of `*.rb` and `*.rbi` files in the same project. These issues should not otherwise manifest when this library is used as a dependency. + +1. For go to definition usages, sorbet might get confused and may not always navigate to the correct location. + +2. For each generic type in `*.rbi` files, a spurious "Duplicate type member" error is present. + +## Documentation Preview + +To preview the documentation, run: + +```bash +$ bundle exec rake docs:preview [PORT=8808] +``` diff --git a/Gemfile b/Gemfile index b064fc5a..0d76364b 100644 --- a/Gemfile +++ b/Gemfile @@ -5,12 +5,6 @@ source "https://rubygems.org" gemspec group :development do - gem "async" - gem "minitest" - gem "minitest-focus" - gem "minitest-hooks" - gem "minitest-proveit" - gem "minitest-rg" gem "rake" gem "rbs" gem "rubocop" @@ -20,6 +14,20 @@ group :development do # TODO: using a fork for now, the prettier below has a bug gem "syntax_tree-rbs", github: "stainless-api/syntax_tree-rbs", branch: "main" gem "tapioca" +end + +group :development, :test do + gem "async" + gem "minitest" + gem "minitest-focus" + gem "minitest-hooks" + gem "minitest-proveit" + gem "minitest-rg" + gem "webmock" +end + +group :development, :docs do + gem "redcarpet" gem "webrick" gem "yard" end diff --git a/Gemfile.lock b/Gemfile.lock index 37e29d5a..46a5d18d 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,6 +1,6 @@ GIT remote: https://github.com/stainless-api/syntax_tree-rbs.git - revision: 140eb3ba2ff4b959b345ac2a7927cd758a9f1284 + revision: c30b50219918be7cfe3ef803a00b59d1e77fcada branch: main specs: syntax_tree-rbs (1.0.0) @@ -11,13 +11,13 @@ GIT PATH remote: . specs: - openai (0.0.1.pre.alpha.0) + openai (0.19.0) connection_pool GEM remote: https://rubygems.org/ specs: - activesupport (7.2.2.1) + activesupport (8.0.2.1) base64 benchmark (>= 0.3) bigdecimal @@ -29,43 +29,55 @@ GEM minitest (>= 5.1) securerandom (>= 0.3) tzinfo (~> 2.0, >= 2.0.5) - ast (2.4.2) - async (2.23.0) + uri (>= 0.13.1) + addressable (2.8.7) + public_suffix (>= 2.0.2, < 7.0) + ast (2.4.3) + async (2.27.3) console (~> 1.29) fiber-annotation - io-event (~> 1.9) + io-event (~> 1.11) metrics (~> 0.12) traces (~> 0.15) - base64 (0.2.0) - benchmark (0.4.0) - bigdecimal (3.1.9) + base64 (0.3.0) + benchmark (0.4.1) + bigdecimal (3.2.2) concurrent-ruby (1.3.5) - connection_pool (2.5.0) - console (1.29.3) + connection_pool (2.5.3) + console (1.33.0) fiber-annotation fiber-local (~> 1.1) json - csv (3.3.2) - drb (2.2.1) + crack (1.0.0) + bigdecimal + rexml + csv (3.3.5) + drb (2.2.3) erubi (1.13.1) - ffi (1.17.1) + ffi (1.17.2-aarch64-linux-gnu) + ffi (1.17.2-aarch64-linux-musl) + ffi (1.17.2-arm64-darwin) + ffi (1.17.2-x86_64-darwin) + ffi (1.17.2-x86_64-linux-gnu) + ffi (1.17.2-x86_64-linux-musl) fiber-annotation (0.2.0) fiber-local (1.1.0) fiber-storage - fiber-storage (1.0.0) + fiber-storage (1.0.1) fileutils (1.7.3) + hashdiff (1.2.0) i18n (1.14.7) concurrent-ruby (~> 1.0) - io-event (1.9.0) - json (2.10.1) - language_server-protocol (3.17.0.4) + io-event (1.11.2) + json (2.13.2) + language_server-protocol (3.17.0.5) lint_roller (1.1.0) listen (3.9.0) rb-fsevent (~> 0.10, >= 0.10.3) rb-inotify (~> 0.9, >= 0.9.10) - logger (1.6.6) - metrics (0.12.1) - minitest (5.25.4) + logger (1.7.0) + metrics (0.13.0) + minitest (5.25.5) minitest-focus (1.4.0) minitest (>= 4, < 6) minitest-hooks (1.5.2) @@ -74,26 +86,30 @@ GEM minitest (> 5, < 7) minitest-rg (5.3.0) minitest (~> 5.0) + mutex_m (0.3.0) netrc (0.11.0) - parallel (1.26.3) - parser (3.3.7.1) + parallel (1.27.0) + parser (3.3.9.0) ast (~> 2.4.1) racc prettier_print (1.2.1) - prism (1.3.0) + prism (1.4.0) + public_suffix (6.0.2) racc (1.8.1) rainbow (3.1.1) - rake (13.2.1) + rake (13.3.0) rb-fsevent (0.11.2) rb-inotify (0.11.1) ffi (~> 1.0) - rbi (0.2.4) + rbi (0.3.6) prism (~> 1.0) - sorbet-runtime (>= 0.5.9204) - rbs (3.8.1) + rbs (>= 3.4.4) + rbs (3.9.4) logger - regexp_parser (2.10.0) - rubocop (1.73.2) + redcarpet (3.6.1) + regexp_parser (2.11.2) + rexml (3.4.1) + rubocop (1.79.2) json (~> 2.3) language_server-protocol (~> 3.17.0.2) lint_roller (~> 1.1.0) @@ -101,44 +117,49 @@ GEM parser (>= 3.3.0.2) rainbow (>= 2.2.2, < 4.0) regexp_parser (>= 2.9.3, < 3.0) - rubocop-ast (>= 1.38.0, < 2.0) + rubocop-ast (>= 1.46.0, < 2.0) ruby-progressbar (~> 1.7) unicode-display_width (>= 2.4.0, < 4.0) - rubocop-ast (1.38.1) - parser (>= 3.3.1.0) + rubocop-ast (1.46.0) + parser (>= 3.3.7.2) + prism (~> 1.4) ruby-progressbar (1.13.0) securerandom (0.4.1) - sorbet (0.5.11888) - sorbet-static (= 0.5.11888) - sorbet-runtime (0.5.11888) - sorbet-static (0.5.11888-x86_64-linux) - sorbet-static-and-runtime (0.5.11888) - sorbet (= 0.5.11888) - sorbet-runtime (= 0.5.11888) - spoom (1.5.4) + sorbet (0.5.12424) + sorbet-static (= 0.5.12424) + sorbet-runtime (0.5.12424) + sorbet-static (0.5.12424-aarch64-linux) + sorbet-static (0.5.12424-universal-darwin) + sorbet-static (0.5.12424-x86_64-linux) + sorbet-static-and-runtime (0.5.12424) + sorbet (= 0.5.12424) + sorbet-runtime (= 0.5.12424) + spoom (1.6.3) erubi (>= 1.10.0) prism (>= 0.28.0) - rbi (>= 0.2.3) + rbi (>= 0.3.3) + rexml (>= 3.2.6) sorbet-static-and-runtime (>= 0.5.10187) thor (>= 0.19.2) - steep (1.9.4) + steep (1.10.0) activesupport (>= 5.1) concurrent-ruby (>= 1.1.10) csv (>= 3.0.9) fileutils (>= 1.1.0) json (>= 2.1.0) - language_server-protocol (>= 3.15, < 4.0) + language_server-protocol (>= 3.17.0.4, < 4.0) listen (~> 3.0) logger (>= 1.3.0) + mutex_m (>= 0.3.0) parser (>= 3.1) rainbow (>= 2.2.2, < 4.0) - rbs (~> 3.8) + rbs (~> 3.9) securerandom (>= 0.1) strscan (>= 1.0.0) - terminal-table (>= 2, < 4) + terminal-table (>= 2, < 5) uri (>= 0.12.0) - strscan (3.1.2) - syntax_tree (6.2.0) + strscan (3.1.5) + syntax_tree (6.3.0) prettier_print (>= 1.2.0) tapioca (0.16.11) benchmark @@ -150,14 +171,20 @@ GEM spoom (>= 1.2.0) thor (>= 1.2.0) yard-sorbet - terminal-table (3.0.2) - unicode-display_width (>= 1.1.1, < 3) - thor (1.3.2) - traces (0.15.2) + terminal-table (4.0.0) + unicode-display_width (>= 1.1.1, < 4) + thor (1.4.0) + traces (0.17.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - unicode-display_width (2.6.0) + unicode-display_width (3.1.5) + unicode-emoji (~> 4.0, >= 4.0.4) + unicode-emoji (4.0.4) uri (1.0.3) + webmock (3.25.1) + addressable (>= 2.8.0) + crack (>= 0.3.2) + hashdiff (>= 0.4.0, < 2.0.0) webrick (1.9.1) yard (0.9.37) yard-sorbet (0.9.0) @@ -165,7 +192,14 @@ GEM yard PLATFORMS - x86_64-linux + aarch64-linux + aarch64-linux-gnu + aarch64-linux-musl + arm64-darwin + universal-darwin + x86_64-darwin + x86_64-linux-gnu + x86_64-linux-musl DEPENDENCIES async @@ -177,14 +211,16 @@ DEPENDENCIES openai! rake rbs + redcarpet rubocop sorbet steep syntax_tree syntax_tree-rbs! tapioca + webmock webrick yard BUNDLED WITH - 2.3.3 + 2.4.1 diff --git a/README.md b/README.md index c5e359bc..d4bc51fd 100644 --- a/README.md +++ b/README.md @@ -1,32 +1,24 @@ # OpenAI Ruby API library -The OpenAI Ruby library provides convenient access to the OpenAI REST API from any Ruby 3.0.0+ application. +The OpenAI Ruby library provides convenient access to the OpenAI REST API from any Ruby 3.2.0+ application. It ships with comprehensive types & docstrings in Yard, RBS, and RBI – [see below](https://github.com/openai/openai-ruby#Sorbet) for usage with Sorbet. The standard library's `net/http` is used as the HTTP transport, with connection pooling via the `connection_pool` gem. ## Documentation -Documentation for the most recent release of this gem can be found [on RubyDoc](https://gemdocs.org/gems/openai/latest). +Documentation for releases of this gem can be found [on RubyDoc](https://gemdocs.org/gems/openai). -The underlying REST API documentation can be found on [platform.openai.com](https://platform.openai.com/docs). +The REST API documentation can be found on [platform.openai.com](https://platform.openai.com/docs). ## Installation -To use this gem during the beta, install directly from GitHub with Bundler by adding the following to your application's `Gemfile`: +To use this gem, install via Bundler by adding the following to your application's `Gemfile`: -```ruby -gem "openai", git: "https://github.com/openai/openai-ruby", branch: "main" -``` - -To fetch an initial copy of the gem: + -```sh -bundle install +```ruby +gem "openai", "~> 0.19.0" ``` -To update the version used by your application when updates are pushed to GitHub: - -```sh -bundle update openai -``` + ## Usage @@ -35,20 +27,29 @@ require "bundler/setup" require "openai" openai = OpenAI::Client.new( - api_key: "My API Key" # defaults to ENV["OPENAI_API_KEY"] + api_key: ENV["OPENAI_API_KEY"] # This is the default and can be omitted ) -chat_completion = openai.chat.completions.create( - messages: [{ - role: "user", - content: "Say this is a test" - }], - model: "gpt-4o" -) +chat_completion = openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5") puts(chat_completion) ``` +### Streaming + +We provide support for streaming responses using Server-Sent Events (SSE). + +```ruby +stream = openai.chat.completions.stream_raw( + messages: [{role: "user", content: "Say this is a test"}], + model: :"gpt-5" +) + +stream.each do |completion| + puts(completion) +end +``` + ### Pagination List methods in the OpenAI API are paginated. @@ -68,37 +69,56 @@ page.auto_paging_each do |job| end ``` -### Streaming +Alternatively, you can use the `#next_page?` and `#next_page` methods for more granular control working with pages. + +```ruby +if page.next_page? + new_page = page.next_page + puts(new_page.data[0].id) +end +``` + +### File uploads -We provide support for streaming responses using Server Side Events (SSE). +Request parameters that correspond to file uploads can be passed as raw contents, a [`Pathname`](https://rubyapi.org/3.2/o/pathname) instance, [`StringIO`](https://rubyapi.org/3.2/o/stringio), or more. ```ruby -stream = openai.chat_completions_create_streaming( - messages: [{ - role: "user", - content: "Say this is a test" - }], - model: "gpt-4o" -) +require "pathname" -stream.for_each do |completion| - puts(completion) -end +# Use `Pathname` to send the filename and/or avoid paging a large file into memory: +file_object = openai.files.create(file: Pathname("input.jsonl"), purpose: "fine-tune") + +# Alternatively, pass file contents or a `StringIO` directly: +file_object = openai.files.create(file: File.read("input.jsonl"), purpose: "fine-tune") + +# Or, to control the filename and/or content type: +file = OpenAI::FilePart.new(File.read("input.jsonl"), filename: "input.jsonl", content_type: "…") +file_object = openai.files.create(file: file, purpose: "fine-tune") + +puts(file_object.id) ``` -### Errors +Note that you can also pass a raw `IO` descriptor, but this disables retries, as the library can't be sure if the descriptor is a file or pipe (which cannot be rewound). + +### Handling errors -When the library is unable to connect to the API, or if the API returns a non-success status code (i.e., 4xx or 5xx response), a subclass of `OpenAI::Error` will be thrown: +When the library is unable to connect to the API, or if the API returns a non-success status code (i.e., 4xx or 5xx response), a subclass of `OpenAI::Errors::APIError` will be thrown: ```ruby begin - job = openai.fine_tuning.jobs.create(model: "gpt-4o", training_file: "file-abc123") -rescue OpenAI::Error => e - puts(e.status) # 400 + job = openai.fine_tuning.jobs.create(model: :"babbage-002", training_file: "file-abc123") +rescue OpenAI::Errors::APIConnectionError => e + puts("The server could not be reached") + puts(e.cause) # an underlying Exception, likely raised within `net/http` +rescue OpenAI::Errors::RateLimitError => e + puts("A 429 status code was received; we should back off a bit.") +rescue OpenAI::Errors::APIStatusError => e + puts("Another non-200-range status code was received") + puts(e.status) end ``` -Error codes are as followed: +Error codes are as follows: | Cause | Error Type | | ---------------- | -------------------------- | @@ -109,7 +129,7 @@ Error codes are as followed: | HTTP 409 | `ConflictError` | | HTTP 422 | `UnprocessableEntityError` | | HTTP 429 | `RateLimitError` | -| HTTP >=500 | `InternalServerError` | +| HTTP >= 500 | `InternalServerError` | | Other HTTP error | `APIStatusError` | | Timeout | `APITimeoutError` | | Network error | `APIConnectionError` | @@ -130,22 +150,15 @@ openai = OpenAI::Client.new( # Or, configure per-request: openai.chat.completions.create( - messages: [{ - role: "user", - content: "How can I get the name of the current day in JavaScript?" - }], - model: "gpt-4o", + messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}], + model: :"gpt-5", request_options: {max_retries: 5} ) ``` ### Timeouts -By default, requests will time out after 600 seconds. - -Timeouts are applied separately to the initial connection and the overall request time, so in some cases a request could wait 2\*timeout seconds before it fails. - -You can use the `timeout` option to configure or disable this: +By default, requests will time out after 600 seconds. You can use the timeout option to configure or disable this: ```ruby # Configure the default for all requests: @@ -155,33 +168,134 @@ openai = OpenAI::Client.new( # Or, configure per-request: openai.chat.completions.create( - messages: [{ - role: "user", - content: "How can I list all files in a directory using Python?" - }], - model: "gpt-4o", + messages: [{role: "user", content: "How can I list all files in a directory using Python?"}], + model: :"gpt-5", request_options: {timeout: 5} ) ``` -## Sorbet Support +On timeout, `OpenAI::Errors::APITimeoutError` is raised. -This library is written with [Sorbet type definitions](https://sorbet.org/docs/rbi). However, there is no runtime dependency on the `sorbet-runtime`. +Note that requests that time out are retried by default. -What this means is that while you can use Sorbet to type check your code statically, and benefit from the [Sorbet Language Server](https://sorbet.org/docs/lsp) in your editor, there is no runtime type checking and execution overhead from Sorbet itself. +## Advanced concepts -Due to limitations with the Sorbet type system, where a method otherwise can take an instance of `OpenAI::BaseModel` class, you will need to use the `**` splat operator to pass the arguments: +### BaseModel + +All parameter and response objects inherit from `OpenAI::Internal::Type::BaseModel`, which provides several conveniences, including: + +1. All fields, including unknown ones, are accessible with `obj[:prop]` syntax, and can be destructured with `obj => {prop: prop}` or pattern-matching syntax. + +2. Structural equivalence for equality; if two API calls return the same values, comparing the responses with == will return true. + +3. Both instances and the classes themselves can be pretty-printed. + +4. Helpers such as `#to_h`, `#deep_to_h`, `#to_json`, and `#to_yaml`. + +### Making custom or undocumented requests + +#### Undocumented properties + +You can send undocumented parameters to any endpoint, and read undocumented response properties, like so: + +Note: the `extra_` parameters of the same name overrides the documented parameters. ```ruby -model = CompletionCreateParams.new( - messages: [{ - role: "user", - content: "Say this is a test" - }], - model: "gpt-4o" +chat_completion = + openai.chat.completions.create( + messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}], + model: :"gpt-5", + request_options: { + extra_query: {my_query_parameter: value}, + extra_body: {my_body_parameter: value}, + extra_headers: {"my-header": value} + } + ) + +puts(chat_completion[:my_undocumented_property]) +``` + +#### Undocumented request params + +If you want to explicitly send an extra param, you can do so with the `extra_query`, `extra_body`, and `extra_headers` under the `request_options:` parameter when making a request, as seen in the examples above. + +#### Undocumented endpoints + +To make requests to undocumented endpoints while retaining the benefit of auth, retries, and so on, you can make requests using `client.request`, like so: + +```ruby +response = client.request( + method: :post, + path: '/undocumented/endpoint', + query: {"dog": "woof"}, + headers: {"useful-header": "interesting-value"}, + body: {"hello": "world"} ) +``` + +### Concurrency & connection pooling + +The `OpenAI::Client` instances are threadsafe, but are only are fork-safe when there are no in-flight HTTP requests. + +Each instance of `OpenAI::Client` has its own HTTP connection pool with a default size of 99. As such, we recommend instantiating the client once per application in most settings. + +When all available connections from the pool are checked out, requests wait for a new connection to become available, with queue time counting towards the request timeout. -openai.chat.completions.create(**model) +Unless otherwise specified, other classes in the SDK do not have locks protecting their underlying data structure. + +## Sorbet + +This library provides comprehensive [RBI](https://sorbet.org/docs/rbi) definitions, and has no dependency on sorbet-runtime. + +You can provide typesafe request parameters like so: + +```ruby +openai.chat.completions.create( + messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")], + model: :"gpt-5" +) +``` + +Or, equivalently: + +```ruby +# Hashes work, but are not typesafe: +openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5") + +# You can also splat a full Params class: +params = OpenAI::Chat::CompletionCreateParams.new( + messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")], + model: :"gpt-5" +) +openai.chat.completions.create(**params) +``` + +### Enums + +Since this library does not depend on `sorbet-runtime`, it cannot provide [`T::Enum`](https://sorbet.org/docs/tenum) instances. Instead, we provide "tagged symbols" instead, which is always a primitive at runtime: + +```ruby +# :minimal +puts(OpenAI::ReasoningEffort::MINIMAL) + +# Revealed type: `T.all(OpenAI::ReasoningEffort, Symbol)` +T.reveal_type(OpenAI::ReasoningEffort::MINIMAL) +``` + +Enum parameters have a "relaxed" type, so you can either pass in enum constants or their literal value: + +```ruby +# Using the enum constants preserves the tagged type information: +openai.chat.completions.create( + reasoning_effort: OpenAI::ReasoningEffort::MINIMAL, + # … +) + +# Literal values are also permissible: +openai.chat.completions.create( + reasoning_effort: :minimal, + # … +) ``` ## Versioning @@ -192,4 +306,8 @@ This package considers improvements to the (non-runtime) `*.rbi` and `*.rbs` typ ## Requirements -Ruby 3.0.0 or higher. +Ruby 3.2.0 or higher. + +## Contributing + +See [the contributing documentation](https://github.com/openai/openai-ruby/tree/main/CONTRIBUTING.md). diff --git a/Rakefile b/Rakefile index 7ea8a2fc..bc850886 100644 --- a/Rakefile +++ b/Rakefile @@ -1,89 +1,160 @@ # frozen_string_literal: true +require "pathname" +require "securerandom" +require "shellwords" + require "minitest/test_task" require "rake/clean" require "rubocop/rake_task" -require "securerandom" -require "shellwords" -CLEAN.push(*%w[.idea/ .ruby-lsp/ .yardoc/]) +tapioca = "sorbet/tapioca" +examples = "examples" +ignore_file = ".ignore" -xargs = %w[xargs --no-run-if-empty --null --max-procs=0 --max-args=300 --] +CLEAN.push(*%w[.idea/ .ruby-lsp/ .yardoc/ doc/], *FileList["*.gem"], ignore_file) -task(default: [:test, :format]) +CLOBBER.push(*%w[sorbet/rbi/annotations/ sorbet/rbi/gems/], tapioca) -Minitest::TestTask.create do |t| - t.libs = %w[.] - t.test_globs = ENV.fetch("TEST", "./test/**/*_test.rb") +multitask(:default) do + sh(*%w[rake --tasks]) end -RuboCop::RakeTask.new(:rubocop) do |t| - t.options = %w[--fail-level E] - if ENV.key?("CI") - t.options += %w[--format github] - end +desc("Preview docs; use `PORT=` to change the port") +multitask(:"docs:preview") do + sh(*%w[yard server --reload --quiet --bind [::] --port], ENV.fetch("PORT", "8808")) end -multitask(:ruboformat) do - find = %w[find ./lib ./test ./rbi -type f -and ( -name *.rb -or -name *.rbi ) -print0] +desc("Run test suites; use `TEST=path/to/test.rb` to run a specific test file") +multitask(:test) do + rb = + FileList[ENV.fetch("TEST", "./test/**/*_test.rb")] + .map { "require_relative(#{_1.dump});" } + .join + + ruby(*%w[-w -e], rb, verbose: false) { fail unless _1 } +end + +xargs = %w[xargs --no-run-if-empty --null --max-procs=0 --max-args=300 --] +ruby_opt = {"RUBYOPT" => [ENV["RUBYOPT"], "--encoding=UTF-8"].compact.join(" ")} + +desc("Lint `*.rb(i)`") +multitask(:"lint:rubocop") do + find = %w[find ./lib ./test ./rbi ./examples -type f -and ( -name *.rb -or -name *.rbi ) -print0] + + rubocop = %w[rubocop] + rubocop += %w[--format github] if ENV.key?("CI") + + # some lines cannot be shortened + rubocop += %w[--except Lint/RedundantCopDisableDirective,Layout/LineLength] + + lint = xargs + rubocop + sh("#{find.shelljoin} | #{lint.shelljoin}") +end + +desc("Format `*.rb`") +multitask(:"format:rb") do + # while `syntax_tree` is much faster than `rubocop`, `rubocop` is the only formatter with full syntax support + find = %w[find ./lib ./test ./examples -type f -and -name *.rb -print0] fmt = xargs + %w[rubocop --fail-level F --autocorrect --format simple --] sh("#{find.shelljoin} | #{fmt.shelljoin}") end -multitask(:syntax_tree) do +desc("Format `*.rbi`") +multitask(:"format:rbi") do + find = %w[find ./rbi -type f -and -name *.rbi -print0] + fmt = xargs + %w[stree write --] + sh(ruby_opt, "#{find.shelljoin} | #{fmt.shelljoin}") +end + +desc("Format `*.rbs`") +multitask(:"format:rbs") do find = %w[find ./sig -type f -name *.rbs -print0] - inplace = /darwin|bsd/ =~ RUBY_PLATFORM ? %w[-i''] : %w[-i] + inplace = /darwin|bsd/ =~ RUBY_PLATFORM ? ["-i", ""] : %w[-i] uuid = SecureRandom.uuid - # `syntax_tree` has trouble with `rbs`'s class aliases + # `syntax_tree` has trouble with `rbs`'s class & module aliases - sed = xargs + %w[sed -E] + inplace + %w[-e] - # annotate class aliases with a unique comment - pre = sed + ["s/class ([^ ]+) = (.+$)/# #{uuid}\\n\\1: \\2/", "--"] + sed_bin = /darwin/ =~ RUBY_PLATFORM ? "/usr/bin/sed" : "sed" + sed = xargs + [sed_bin, "-E", *inplace, "-e"] + # annotate unprocessable aliases with a unique comment + pre = sed + ["s/(class|module) ([^ ]+) = (.+$)/# \\1 #{uuid}\\n\\2: \\3/", "--"] fmt = xargs + %w[stree write --plugin=rbs --] - # remove the unique comment and transform class aliases to type aliases + # remove the unique comment and unprocessable aliases to type aliases subst = <<~SED - s/# #{uuid}// + s/# (class|module) #{uuid}/\\1/ t l1 b + : l1 - n - s/([^ :]+): (.+$)/class \\1 = \\2/ + N + s/\\n *([^:]+): (.+)$/ \\1 = \\2/ SED - # 1. delete the unique comment - # 2. if deletion happened, branch to label `l1`, else continue - # 3. transform the class alias to a type alias at label `l1` + # for each line: + # 1. try transform the unique comment into `class | module`, if successful, branch to label `l1`. + # 2. at label `l1`, join previously annotated line with `class | module` information. pst = sed + [subst, "--"] + success = false + # transform class aliases to type aliases, which syntax tree has no trouble with sh("#{find.shelljoin} | #{pre.shelljoin}") # run syntax tree to format `*.rbs` files - sh("#{find.shelljoin} | #{fmt.shelljoin}") + sh(ruby_opt, "#{find.shelljoin} | #{fmt.shelljoin}") do + success = _1 + end # transform type aliases back to class aliases sh("#{find.shelljoin} | #{pst.shelljoin}") + + # always run post-processing to remove comment marker + fail unless success end -multitask(format: [:ruboformat, :syntax_tree]) +desc("Format everything") +multitask(format: [:"format:rb", :"format:rbi", :"format:rbs"]) -multitask(:steep) do +desc("Typecheck `*.rbs`") +multitask(:"typecheck:steep") do sh(*%w[steep check]) end -multitask(:sorbet) do - sh(*%w[srb typecheck -- .], chdir: "./rbi") +directory(examples) + +desc("Typecheck `*.rbi`") +multitask("typecheck:sorbet": examples) do + sh(*%w[srb typecheck --dir], examples) end -file("sorbet/tapioca") do +directory(tapioca) do sh(*%w[tapioca init]) end -multitask(typecheck: [:steep, :sorbet]) -multitask(lint: [:rubocop, :typecheck]) +desc("Typecheck everything") +multitask(typecheck: [:"typecheck:steep", :"typecheck:sorbet"]) + +desc("Lint and typecheck") +multitask(lint: [:"lint:rubocop", :typecheck]) + +desc("Build yard docs") +multitask(:"build:docs") do + sh(*%w[yard]) +end + +desc("Build ruby gem") +multitask(:"build:gem") do + # optimizing for grepping through the gem bundle: many tools honour `.ignore` files, including VSCode + # + # both `rbi` and `sig` directories are navigable by their respective tool chains and therefore can be ignored by tools such as `rg` + Pathname(ignore_file).write(<<~GLOB) + rbi/* + sig/* + GLOB -multitask(:build) do sh(*%w[gem build -- openai.gemspec]) + rm_rf(ignore_file) end -multitask(release: [:build]) do - sh(*%w[gem push], *FileList["openai-*.gem"]) +desc("Release ruby gem") +multitask(release: [:"build:gem"]) do + sh(*%w[gem push], *FileList["*.gem"]) end diff --git a/SECURITY.md b/SECURITY.md index 3b3bd8a6..4adb0c54 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -16,13 +16,13 @@ before making any information public. ## Reporting Non-SDK Related Security Issues If you encounter security issues that are not directly related to SDKs but pertain to the services -or products provided by OpenAI please follow the respective company's security reporting guidelines. +or products provided by OpenAI, please follow the respective company's security reporting guidelines. ### OpenAI Terms and Policies Our Security Policy can be found at [Security Policy URL](https://openai.com/policies/coordinated-vulnerability-disclosure-policy). -Please contact disclosure@openai.com for any questions or concerns regarding security of our services. +Please contact disclosure@openai.com for any questions or concerns regarding the security of our services. --- diff --git a/Steepfile b/Steepfile index 48667fe7..528b48c3 100644 --- a/Steepfile +++ b/Steepfile @@ -2,14 +2,14 @@ require "yaml" -target :lib do +target(:lib) do configure_code_diagnostics(Steep::Diagnostic::Ruby.strict) signature("sig") - YAML.safe_load_file("./manifest.yaml", symbolize_names: true) => { dependencies: } + YAML.safe_load_file("./manifest.yaml", symbolize_names: true) => {dependencies:} # currently these libraries lack the `*.rbs` annotations required by `steep` - stdlibs = dependencies - %w[etc net/http rbconfig set stringio] + stdlibs = dependencies - %w[English etc net/http rbconfig set stringio] stdlibs.each { library(_1) } end diff --git a/bin/check-release-environment b/bin/check-release-environment index 6aa95c4f..468572ab 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -7,7 +7,7 @@ if [ -z "${STAINLESS_API_KEY}" ]; then fi if [ -z "${GEM_HOST_API_KEY}" ]; then - errors+=("The OPENAI_GEM_HOST_API_KEY secret has not been set. Please set it in either this repository's secrets or your organization secrets") + errors+=("The GEM_HOST_API_KEY secret has not been set. Please set it in either this repository's secrets or your organization secrets") fi lenErrors=${#errors[@]} diff --git a/examples/.keep b/examples/.keep new file mode 100644 index 00000000..d8c73e93 --- /dev/null +++ b/examples/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store example files demonstrating usage of this SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/lib/openai.rb b/lib/openai.rb index 2c73d487..fb87b5a4 100644 --- a/lib/openai.rb +++ b/lib/openai.rb @@ -1,46 +1,78 @@ # frozen_string_literal: true # Standard libraries. +# rubocop:disable Lint/RedundantRequireStatement +require "English" require "cgi" require "date" require "erb" require "etc" require "json" require "net/http" +require "pathname" require "rbconfig" require "securerandom" require "set" require "stringio" require "time" require "uri" +# rubocop:enable Lint/RedundantRequireStatement + +# We already ship the preferred sorbet manifests in the package itself. +# `tapioca` currently does not offer us a way to opt out of unnecessary compilation. +if Object.const_defined?(:Tapioca) && + caller.chain([$PROGRAM_NAME]).chain(ARGV).any?(/tapioca/) && + ARGV.none?(/dsl/) + return +end # Gems. require "connection_pool" # Package files. require_relative "openai/version" -require_relative "openai/util" -require_relative "openai/extern" -require_relative "openai/base_model" -require_relative "openai/base_page" -require_relative "openai/base_stream" +require_relative "openai/internal/util" +require_relative "openai/internal/type/converter" +require_relative "openai/internal/type/unknown" +require_relative "openai/internal/type/boolean" +require_relative "openai/internal/type/file_input" +require_relative "openai/internal/type/enum" +require_relative "openai/internal/type/union" +require_relative "openai/internal/type/array_of" +require_relative "openai/internal/type/hash_of" +require_relative "openai/internal/type/base_model" +require_relative "openai/internal/type/base_page" +require_relative "openai/internal/type/base_stream" +require_relative "openai/internal/type/request_parameters" +require_relative "openai/internal" require_relative "openai/request_options" +require_relative "openai/file_part" require_relative "openai/errors" -require_relative "openai/base_client" -require_relative "openai/pooled_net_requester" +require_relative "openai/internal/transport/base_client" +require_relative "openai/internal/transport/pooled_net_requester" require_relative "openai/client" -require_relative "openai/stream" -require_relative "openai/cursor_page" -require_relative "openai/page" +require_relative "openai/internal/stream" +require_relative "openai/internal/conversation_cursor_page" +require_relative "openai/internal/cursor_page" +require_relative "openai/internal/page" require_relative "openai/models/reasoning_effort" require_relative "openai/models/chat/chat_completion_message" +require_relative "openai/models/graders/score_model_grader" +require_relative "openai/models/graders/python_grader" +require_relative "openai/models/graders/text_similarity_grader" require_relative "openai/models/fine_tuning/fine_tuning_job_wandb_integration_object" +require_relative "openai/models/responses/response_function_tool_call" +require_relative "openai/models/all_models" require_relative "openai/models/audio/speech_create_params" require_relative "openai/models/audio/speech_model" require_relative "openai/models/audio/transcription" require_relative "openai/models/audio/transcription_create_params" require_relative "openai/models/audio/transcription_create_response" +require_relative "openai/models/audio/transcription_include" require_relative "openai/models/audio/transcription_segment" +require_relative "openai/models/audio/transcription_stream_event" +require_relative "openai/models/audio/transcription_text_delta_event" +require_relative "openai/models/audio/transcription_text_done_event" require_relative "openai/models/audio/transcription_verbose" require_relative "openai/models/audio/transcription_word" require_relative "openai/models/audio/translation" @@ -147,6 +179,8 @@ require_relative "openai/models/beta/thread_stream_event" require_relative "openai/models/beta/thread_update_params" require_relative "openai/models/chat/chat_completion" +require_relative "openai/models/chat/chat_completion_allowed_tool_choice" +require_relative "openai/models/chat/chat_completion_allowed_tools" require_relative "openai/models/chat/chat_completion_assistant_message_param" require_relative "openai/models/chat/chat_completion_audio" require_relative "openai/models/chat/chat_completion_audio_param" @@ -156,14 +190,19 @@ require_relative "openai/models/chat/chat_completion_content_part_input_audio" require_relative "openai/models/chat/chat_completion_content_part_refusal" require_relative "openai/models/chat/chat_completion_content_part_text" +require_relative "openai/models/chat/chat_completion_custom_tool" require_relative "openai/models/chat/chat_completion_deleted" require_relative "openai/models/chat/chat_completion_developer_message_param" require_relative "openai/models/chat/chat_completion_function_call_option" require_relative "openai/models/chat/chat_completion_function_message_param" +require_relative "openai/models/chat/chat_completion_function_tool" +require_relative "openai/models/chat/chat_completion_message_custom_tool_call" +require_relative "openai/models/chat/chat_completion_message_function_tool_call" require_relative "openai/models/chat/chat_completion_message_param" require_relative "openai/models/chat/chat_completion_message_tool_call" require_relative "openai/models/chat/chat_completion_modality" require_relative "openai/models/chat/chat_completion_named_tool_choice" +require_relative "openai/models/chat/chat_completion_named_tool_choice_custom" require_relative "openai/models/chat/chat_completion_prediction_content" require_relative "openai/models/chat/chat_completion_reasoning_effort" require_relative "openai/models/chat/chat_completion_role" @@ -188,13 +227,86 @@ require_relative "openai/models/completion_create_params" require_relative "openai/models/completion_usage" require_relative "openai/models/compound_filter" +require_relative "openai/models/container_create_params" +require_relative "openai/models/container_create_response" +require_relative "openai/models/container_delete_params" +require_relative "openai/models/container_list_params" +require_relative "openai/models/container_list_response" +require_relative "openai/models/container_retrieve_params" +require_relative "openai/models/container_retrieve_response" +require_relative "openai/models/containers/file_create_params" +require_relative "openai/models/containers/file_create_response" +require_relative "openai/models/containers/file_delete_params" +require_relative "openai/models/containers/file_list_params" +require_relative "openai/models/containers/file_list_response" +require_relative "openai/models/containers/file_retrieve_params" +require_relative "openai/models/containers/file_retrieve_response" +require_relative "openai/models/containers/files/content_retrieve_params" +require_relative "openai/models/conversations/computer_screenshot_content" +require_relative "openai/models/conversations/container_file_citation_body" +require_relative "openai/models/conversations/conversation" +require_relative "openai/models/conversations/conversation_create_params" +require_relative "openai/models/conversations/conversation_deleted" +require_relative "openai/models/conversations/conversation_deleted_resource" +require_relative "openai/models/conversations/conversation_delete_params" +require_relative "openai/models/conversations/conversation_item" +require_relative "openai/models/conversations/conversation_item_list" +require_relative "openai/models/conversations/conversation_retrieve_params" +require_relative "openai/models/conversations/conversation_update_params" +require_relative "openai/models/conversations/file_citation_body" +require_relative "openai/models/conversations/input_file_content" +require_relative "openai/models/conversations/input_image_content" +require_relative "openai/models/conversations/input_text_content" +require_relative "openai/models/conversations/item_create_params" +require_relative "openai/models/conversations/item_delete_params" +require_relative "openai/models/conversations/item_list_params" +require_relative "openai/models/conversations/item_retrieve_params" +require_relative "openai/models/conversations/lob_prob" +require_relative "openai/models/conversations/message" +require_relative "openai/models/conversations/output_text_content" +require_relative "openai/models/conversations/refusal_content" +require_relative "openai/models/conversations/summary_text_content" +require_relative "openai/models/conversations/text_content" +require_relative "openai/models/conversations/top_log_prob" +require_relative "openai/models/conversations/url_citation_body" require_relative "openai/models/create_embedding_response" +require_relative "openai/models/custom_tool_input_format" require_relative "openai/models/embedding" require_relative "openai/models/embedding_create_params" require_relative "openai/models/embedding_model" require_relative "openai/models/error_object" +require_relative "openai/models/eval_create_params" +require_relative "openai/models/eval_create_response" +require_relative "openai/models/eval_custom_data_source_config" +require_relative "openai/models/eval_delete_params" +require_relative "openai/models/eval_delete_response" +require_relative "openai/models/eval_list_params" +require_relative "openai/models/eval_list_response" +require_relative "openai/models/eval_retrieve_params" +require_relative "openai/models/eval_retrieve_response" +require_relative "openai/models/evals/create_eval_completions_run_data_source" +require_relative "openai/models/evals/create_eval_jsonl_run_data_source" +require_relative "openai/models/evals/eval_api_error" +require_relative "openai/models/evals/run_cancel_params" +require_relative "openai/models/evals/run_cancel_response" +require_relative "openai/models/evals/run_create_params" +require_relative "openai/models/evals/run_create_response" +require_relative "openai/models/evals/run_delete_params" +require_relative "openai/models/evals/run_delete_response" +require_relative "openai/models/evals/run_list_params" +require_relative "openai/models/evals/run_list_response" +require_relative "openai/models/evals/run_retrieve_params" +require_relative "openai/models/evals/run_retrieve_response" +require_relative "openai/models/evals/runs/output_item_list_params" +require_relative "openai/models/evals/runs/output_item_list_response" +require_relative "openai/models/evals/runs/output_item_retrieve_params" +require_relative "openai/models/evals/runs/output_item_retrieve_response" +require_relative "openai/models/eval_stored_completions_data_source_config" +require_relative "openai/models/eval_update_params" +require_relative "openai/models/eval_update_response" require_relative "openai/models/file_chunking_strategy" require_relative "openai/models/file_chunking_strategy_param" +require_relative "openai/models/file_content" require_relative "openai/models/file_content_params" require_relative "openai/models/file_create_params" require_relative "openai/models/file_deleted" @@ -203,6 +315,18 @@ require_relative "openai/models/file_object" require_relative "openai/models/file_purpose" require_relative "openai/models/file_retrieve_params" +require_relative "openai/models/fine_tuning/alpha/grader_run_params" +require_relative "openai/models/fine_tuning/alpha/grader_run_response" +require_relative "openai/models/fine_tuning/alpha/grader_validate_params" +require_relative "openai/models/fine_tuning/alpha/grader_validate_response" +require_relative "openai/models/fine_tuning/checkpoints/permission_create_params" +require_relative "openai/models/fine_tuning/checkpoints/permission_create_response" +require_relative "openai/models/fine_tuning/checkpoints/permission_delete_params" +require_relative "openai/models/fine_tuning/checkpoints/permission_delete_response" +require_relative "openai/models/fine_tuning/checkpoints/permission_retrieve_params" +require_relative "openai/models/fine_tuning/checkpoints/permission_retrieve_response" +require_relative "openai/models/fine_tuning/dpo_hyperparameters" +require_relative "openai/models/fine_tuning/dpo_method" require_relative "openai/models/fine_tuning/fine_tuning_job" require_relative "openai/models/fine_tuning/fine_tuning_job_event" require_relative "openai/models/fine_tuning/fine_tuning_job_integration" @@ -211,15 +335,30 @@ require_relative "openai/models/fine_tuning/job_create_params" require_relative "openai/models/fine_tuning/job_list_events_params" require_relative "openai/models/fine_tuning/job_list_params" +require_relative "openai/models/fine_tuning/job_pause_params" +require_relative "openai/models/fine_tuning/job_resume_params" require_relative "openai/models/fine_tuning/job_retrieve_params" require_relative "openai/models/fine_tuning/jobs/checkpoint_list_params" require_relative "openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint" +require_relative "openai/models/fine_tuning/reinforcement_hyperparameters" +require_relative "openai/models/fine_tuning/reinforcement_method" +require_relative "openai/models/fine_tuning/supervised_hyperparameters" +require_relative "openai/models/fine_tuning/supervised_method" require_relative "openai/models/function_definition" require_relative "openai/models/function_parameters" +require_relative "openai/models/graders/label_model_grader" +require_relative "openai/models/graders/multi_grader" +require_relative "openai/models/graders/string_check_grader" require_relative "openai/models/image" require_relative "openai/models/image_create_variation_params" +require_relative "openai/models/image_edit_completed_event" require_relative "openai/models/image_edit_params" +require_relative "openai/models/image_edit_partial_image_event" +require_relative "openai/models/image_edit_stream_event" +require_relative "openai/models/image_gen_completed_event" require_relative "openai/models/image_generate_params" +require_relative "openai/models/image_gen_partial_image_event" +require_relative "openai/models/image_gen_stream_event" require_relative "openai/models/image_model" require_relative "openai/models/images_response" require_relative "openai/models/metadata" @@ -240,7 +379,10 @@ require_relative "openai/models/response_format_json_object" require_relative "openai/models/response_format_json_schema" require_relative "openai/models/response_format_text" +require_relative "openai/models/response_format_text_grammar" +require_relative "openai/models/response_format_text_python" require_relative "openai/models/responses/computer_tool" +require_relative "openai/models/responses/custom_tool" require_relative "openai/models/responses/easy_input_message" require_relative "openai/models/responses/file_search_tool" require_relative "openai/models/responses/function_tool" @@ -250,6 +392,7 @@ require_relative "openai/models/responses/response_audio_done_event" require_relative "openai/models/responses/response_audio_transcript_delta_event" require_relative "openai/models/responses/response_audio_transcript_done_event" +require_relative "openai/models/responses/response_cancel_params" require_relative "openai/models/responses/response_code_interpreter_call_code_delta_event" require_relative "openai/models/responses/response_code_interpreter_call_code_done_event" require_relative "openai/models/responses/response_code_interpreter_call_completed_event" @@ -258,11 +401,18 @@ require_relative "openai/models/responses/response_code_interpreter_tool_call" require_relative "openai/models/responses/response_completed_event" require_relative "openai/models/responses/response_computer_tool_call" +require_relative "openai/models/responses/response_computer_tool_call_output_item" +require_relative "openai/models/responses/response_computer_tool_call_output_screenshot" require_relative "openai/models/responses/response_content" require_relative "openai/models/responses/response_content_part_added_event" require_relative "openai/models/responses/response_content_part_done_event" +require_relative "openai/models/responses/response_conversation_param" require_relative "openai/models/responses/response_created_event" require_relative "openai/models/responses/response_create_params" +require_relative "openai/models/responses/response_custom_tool_call" +require_relative "openai/models/responses/response_custom_tool_call_input_delta_event" +require_relative "openai/models/responses/response_custom_tool_call_input_done_event" +require_relative "openai/models/responses/response_custom_tool_call_output" require_relative "openai/models/responses/response_delete_params" require_relative "openai/models/responses/response_error" require_relative "openai/models/responses/response_error_event" @@ -275,8 +425,13 @@ require_relative "openai/models/responses/response_format_text_json_schema_config" require_relative "openai/models/responses/response_function_call_arguments_delta_event" require_relative "openai/models/responses/response_function_call_arguments_done_event" -require_relative "openai/models/responses/response_function_tool_call" +require_relative "openai/models/responses/response_function_tool_call_item" +require_relative "openai/models/responses/response_function_tool_call_output_item" require_relative "openai/models/responses/response_function_web_search" +require_relative "openai/models/responses/response_image_gen_call_completed_event" +require_relative "openai/models/responses/response_image_gen_call_generating_event" +require_relative "openai/models/responses/response_image_gen_call_in_progress_event" +require_relative "openai/models/responses/response_image_gen_call_partial_image_event" require_relative "openai/models/responses/response_includable" require_relative "openai/models/responses/response_incomplete_event" require_relative "openai/models/responses/response_in_progress_event" @@ -287,8 +442,18 @@ require_relative "openai/models/responses/response_input_image" require_relative "openai/models/responses/response_input_item" require_relative "openai/models/responses/response_input_message_content_list" +require_relative "openai/models/responses/response_input_message_item" require_relative "openai/models/responses/response_input_text" +require_relative "openai/models/responses/response_item" require_relative "openai/models/responses/response_item_list" +require_relative "openai/models/responses/response_mcp_call_arguments_delta_event" +require_relative "openai/models/responses/response_mcp_call_arguments_done_event" +require_relative "openai/models/responses/response_mcp_call_completed_event" +require_relative "openai/models/responses/response_mcp_call_failed_event" +require_relative "openai/models/responses/response_mcp_call_in_progress_event" +require_relative "openai/models/responses/response_mcp_list_tools_completed_event" +require_relative "openai/models/responses/response_mcp_list_tools_failed_event" +require_relative "openai/models/responses/response_mcp_list_tools_in_progress_event" require_relative "openai/models/responses/response_output_audio" require_relative "openai/models/responses/response_output_item" require_relative "openai/models/responses/response_output_item_added_event" @@ -296,13 +461,21 @@ require_relative "openai/models/responses/response_output_message" require_relative "openai/models/responses/response_output_refusal" require_relative "openai/models/responses/response_output_text" +require_relative "openai/models/responses/response_output_text_annotation_added_event" +require_relative "openai/models/responses/response_prompt" +require_relative "openai/models/responses/response_queued_event" require_relative "openai/models/responses/response_reasoning_item" +require_relative "openai/models/responses/response_reasoning_summary_part_added_event" +require_relative "openai/models/responses/response_reasoning_summary_part_done_event" +require_relative "openai/models/responses/response_reasoning_summary_text_delta_event" +require_relative "openai/models/responses/response_reasoning_summary_text_done_event" +require_relative "openai/models/responses/response_reasoning_text_delta_event" +require_relative "openai/models/responses/response_reasoning_text_done_event" require_relative "openai/models/responses/response_refusal_delta_event" require_relative "openai/models/responses/response_refusal_done_event" require_relative "openai/models/responses/response_retrieve_params" require_relative "openai/models/responses/response_status" require_relative "openai/models/responses/response_stream_event" -require_relative "openai/models/responses/response_text_annotation_delta_event" require_relative "openai/models/responses/response_text_config" require_relative "openai/models/responses/response_text_delta_event" require_relative "openai/models/responses/response_text_done_event" @@ -311,10 +484,14 @@ require_relative "openai/models/responses/response_web_search_call_in_progress_event" require_relative "openai/models/responses/response_web_search_call_searching_event" require_relative "openai/models/responses/tool" +require_relative "openai/models/responses/tool_choice_allowed" +require_relative "openai/models/responses/tool_choice_custom" require_relative "openai/models/responses/tool_choice_function" +require_relative "openai/models/responses/tool_choice_mcp" require_relative "openai/models/responses/tool_choice_options" require_relative "openai/models/responses/tool_choice_types" require_relative "openai/models/responses/web_search_tool" +require_relative "openai/models/responses_model" require_relative "openai/models/static_file_chunking_strategy" require_relative "openai/models/static_file_chunking_strategy_object" require_relative "openai/models/static_file_chunking_strategy_object_param" @@ -347,6 +524,23 @@ require_relative "openai/models/vector_store_search_params" require_relative "openai/models/vector_store_search_response" require_relative "openai/models/vector_store_update_params" +require_relative "openai/models/webhooks/batch_cancelled_webhook_event" +require_relative "openai/models/webhooks/batch_completed_webhook_event" +require_relative "openai/models/webhooks/batch_expired_webhook_event" +require_relative "openai/models/webhooks/batch_failed_webhook_event" +require_relative "openai/models/webhooks/eval_run_canceled_webhook_event" +require_relative "openai/models/webhooks/eval_run_failed_webhook_event" +require_relative "openai/models/webhooks/eval_run_succeeded_webhook_event" +require_relative "openai/models/webhooks/fine_tuning_job_cancelled_webhook_event" +require_relative "openai/models/webhooks/fine_tuning_job_failed_webhook_event" +require_relative "openai/models/webhooks/fine_tuning_job_succeeded_webhook_event" +require_relative "openai/models/webhooks/response_cancelled_webhook_event" +require_relative "openai/models/webhooks/response_completed_webhook_event" +require_relative "openai/models/webhooks/response_failed_webhook_event" +require_relative "openai/models/webhooks/response_incomplete_webhook_event" +require_relative "openai/models/webhooks/unwrap_webhook_event" +require_relative "openai/models/webhooks/webhook_unwrap_params" +require_relative "openai/models" require_relative "openai/resources/audio" require_relative "openai/resources/audio/speech" require_relative "openai/resources/audio/transcriptions" @@ -362,11 +556,26 @@ require_relative "openai/resources/chat/completions" require_relative "openai/resources/chat/completions/messages" require_relative "openai/resources/completions" +require_relative "openai/resources/containers" +require_relative "openai/resources/containers/files" +require_relative "openai/resources/containers/files/content" +require_relative "openai/resources/conversations" +require_relative "openai/resources/conversations/items" require_relative "openai/resources/embeddings" +require_relative "openai/resources/evals" +require_relative "openai/resources/evals/runs" +require_relative "openai/resources/evals/runs/output_items" require_relative "openai/resources/files" require_relative "openai/resources/fine_tuning" +require_relative "openai/resources/fine_tuning/alpha" +require_relative "openai/resources/fine_tuning/alpha/graders" +require_relative "openai/resources/fine_tuning/checkpoints" +require_relative "openai/resources/fine_tuning/checkpoints/permissions" require_relative "openai/resources/fine_tuning/jobs" require_relative "openai/resources/fine_tuning/jobs/checkpoints" +require_relative "openai/resources/fine_tuning/methods" +require_relative "openai/resources/graders" +require_relative "openai/resources/graders/grader_models" require_relative "openai/resources/images" require_relative "openai/resources/models" require_relative "openai/resources/moderations" @@ -377,3 +586,4 @@ require_relative "openai/resources/vector_stores" require_relative "openai/resources/vector_stores/file_batches" require_relative "openai/resources/vector_stores/files" +require_relative "openai/resources/webhooks" diff --git a/lib/openai/base_client.rb b/lib/openai/base_client.rb deleted file mode 100644 index 9707f135..00000000 --- a/lib/openai/base_client.rb +++ /dev/null @@ -1,466 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - # @private - # - # @abstract - # - class BaseClient - # from whatwg fetch spec - MAX_REDIRECTS = 20 - - # rubocop:disable Style/MutableConstant - PLATFORM_HEADERS = { - "x-stainless-arch" => OpenAI::Util.arch, - "x-stainless-lang" => "ruby", - "x-stainless-os" => OpenAI::Util.os, - "x-stainless-package-version" => OpenAI::VERSION, - "x-stainless-runtime" => ::RUBY_ENGINE, - "x-stainless-runtime-version" => ::RUBY_ENGINE_VERSION - } - # rubocop:enable Style/MutableConstant - - class << self - # @private - # - # @param req [Hash{Symbol=>Object}] - # - # @raise [ArgumentError] - # - def validate!(req) - keys = [:method, :path, :query, :headers, :body, :unwrap, :page, :stream, :model, :options] - case req - in Hash - req.each_key do |k| - unless keys.include?(k) - raise ArgumentError.new("Request `req` keys must be one of #{keys}, got #{k.inspect}") - end - end - else - raise ArgumentError.new("Request `req` must be a Hash or RequestOptions, got #{req.inspect}") - end - end - - # @private - # - # @param status [Integer] - # @param headers [Hash{String=>String}, Net::HTTPHeader] - # - # @return [Boolean] - # - def should_retry?(status, headers:) - coerced = OpenAI::Util.coerce_boolean(headers["x-should-retry"]) - case [coerced, status] - in [true | false, _] - coerced - in [_, 408 | 409 | 429 | (500..)] - # retry on: - # 408: timeouts - # 409: locks - # 429: rate limits - # 500+: unknown errors - true - else - false - end - end - - # @private - # - # @param request [Hash{Symbol=>Object}] . - # - # @option request [Symbol] :method - # - # @option request [URI::Generic] :url - # - # @option request [Hash{String=>String}] :headers - # - # @option request [Object] :body - # - # @option request [Integer] :max_retries - # - # @option request [Float] :timeout - # - # @param status [Integer] - # - # @param response_headers [Hash{String=>String}, Net::HTTPHeader] - # - # @return [Hash{Symbol=>Object}] - # - def follow_redirect(request, status:, response_headers:) - method, url, headers = request.fetch_values(:method, :url, :headers) - location = - Kernel.then do - URI.join(url, response_headers["location"]) - rescue ArgumentError - message = "Server responded with status #{status} but no valid location header." - raise OpenAI::APIConnectionError.new(url: url, message: message) - end - - request = {**request, url: location} - - case [url.scheme, location.scheme] - in ["https", "http"] - message = "Tried to redirect to a insecure URL" - raise OpenAI::APIConnectionError.new(url: url, message: message) - else - nil - end - - # from whatwg fetch spec - case [status, method] - in [301 | 302, :post] | [303, _] - drop = %w[content-encoding content-language content-length content-location content-type] - request = { - **request, - method: method == :head ? :head : :get, - headers: headers.except(*drop), - body: nil - } - else - end - - # from undici - if OpenAI::Util.uri_origin(url) != OpenAI::Util.uri_origin(location) - drop = %w[authorization cookie host proxy-authorization] - request = {**request, headers: request.fetch(:headers).except(*drop)} - end - - request - end - end - - # @private - # - # @return [OpenAI::PooledNetRequester] - attr_accessor :requester - - # @private - # - # @param base_url [String] - # @param timeout [Float] - # @param max_retries [Integer] - # @param initial_retry_delay [Float] - # @param max_retry_delay [Float] - # @param headers [Hash{String=>String, Integer, Array, nil}] - # @param idempotency_header [String, nil] - # - def initialize( - base_url:, - timeout: 0.0, - max_retries: 0, - initial_retry_delay: 0.0, - max_retry_delay: 0.0, - headers: {}, - idempotency_header: nil - ) - @requester = OpenAI::PooledNetRequester.new - @headers = OpenAI::Util.normalized_headers( - self.class::PLATFORM_HEADERS, - { - "accept" => "application/json", - "content-type" => "application/json" - }, - headers - ) - @base_url = OpenAI::Util.parse_uri(base_url) - @idempotency_header = idempotency_header&.to_s&.downcase - @max_retries = max_retries - @timeout = timeout - @initial_retry_delay = initial_retry_delay - @max_retry_delay = max_retry_delay - end - - # @private - # - # @return [Hash{String=>String}] - # - private def auth_headers = {} - - # @private - # - # @return [String] - # - private def generate_idempotency_key = "stainless-ruby-retry-#{SecureRandom.uuid}" - - # @private - # - # @param req [Hash{Symbol=>Object}] . - # - # @option req [Symbol] :method - # - # @option req [String, Array] :path - # - # @option req [Hash{String=>Array, String, nil}, nil] :query - # - # @option req [Hash{String=>String, Integer, Array, nil}, nil] :headers - # - # @option req [Object, nil] :body - # - # @option req [Symbol, nil] :unwrap - # - # @option req [Class, nil] :page - # - # @option req [Class, nil] :stream - # - # @option req [OpenAI::Converter, Class, nil] :model - # - # @param opts [Hash{Symbol=>Object}] . - # - # @option opts [String, nil] :idempotency_key - # - # @option opts [Hash{String=>Array, String, nil}, nil] :extra_query - # - # @option opts [Hash{String=>String, nil}, nil] :extra_headers - # - # @option opts [Hash{Symbol=>Object}, nil] :extra_body - # - # @option opts [Integer, nil] :max_retries - # - # @option opts [Float, nil] :timeout - # - # @return [Hash{Symbol=>Object}] - # - private def build_request(req, opts) - method, uninterpolated_path = req.fetch_values(:method, :path) - - path = OpenAI::Util.interpolate_path(uninterpolated_path) - - query = OpenAI::Util.deep_merge(req[:query].to_h, opts[:extra_query].to_h) - - headers = OpenAI::Util.normalized_headers( - @headers, - auth_headers, - req[:headers].to_h, - opts[:extra_headers].to_h - ) - - if @idempotency_header && - !headers.key?(@idempotency_header) && - !Net::HTTP::IDEMPOTENT_METHODS_.include?(method.to_s.upcase) - headers[@idempotency_header] = opts.fetch(:idempotency_key) { generate_idempotency_key } - end - - unless headers.key?("x-stainless-retry-count") - headers["x-stainless-retry-count"] = "0" - end - - timeout = opts.fetch(:timeout, @timeout).to_f.clamp((0..)) - unless headers.key?("x-stainless-timeout") || timeout.zero? - headers["x-stainless-timeout"] = timeout.to_s - end - - headers.reject! { |_, v| v.to_s.empty? } - - body = - case method - in :get | :head | :options | :trace - nil - else - OpenAI::Util.deep_merge(*[req[:body], opts[:extra_body]].compact) - end - - headers, encoded = OpenAI::Util.encode_content(headers, body) - { - method: method, - url: OpenAI::Util.join_parsed_uri(@base_url, {**req, path: path, query: query}), - headers: headers, - body: encoded, - max_retries: opts.fetch(:max_retries, @max_retries), - timeout: timeout - } - end - - # @private - # - # @param headers [Hash{String=>String}] - # @param retry_count [Integer] - # - # @return [Float] - # - private def retry_delay(headers, retry_count:) - # Non-standard extension - span = Float(headers["retry-after-ms"], exception: false)&.then { _1 / 1000 } - return span if span - - retry_header = headers["retry-after"] - return span if (span = Float(retry_header, exception: false)) - - span = retry_header&.then do - Time.httpdate(_1) - Time.now - rescue ArgumentError - nil - end - return span if span - - scale = retry_count**2 - jitter = 1 - (0.25 * rand) - (@initial_retry_delay * scale * jitter).clamp(0, @max_retry_delay) - end - - # @private - # - # @param request [Hash{Symbol=>Object}] . - # - # @option request [Symbol] :method - # - # @option request [URI::Generic] :url - # - # @option request [Hash{String=>String}] :headers - # - # @option request [Object] :body - # - # @option request [Integer] :max_retries - # - # @option request [Float] :timeout - # - # @param redirect_count [Integer] - # - # @param retry_count [Integer] - # - # @param send_retry_header [Boolean] - # - # @raise [OpenAI::APIError] - # @return [Array(Integer, Net::HTTPResponse, Enumerable)] - # - private def send_request(request, redirect_count:, retry_count:, send_retry_header:) - url, headers, max_retries, timeout = request.fetch_values(:url, :headers, :max_retries, :timeout) - input = {**request.except(:timeout), deadline: OpenAI::Util.monotonic_secs + timeout} - - if send_retry_header - headers["x-stainless-retry-count"] = retry_count.to_s - end - - begin - response, stream = @requester.execute(input) - status = Integer(response.code) - rescue OpenAI::APIConnectionError => e - status = e - end - - # normally we want to drain the response body and reuse the HTTP session by clearing the socket buffers - # unless we hit a server error - srv_fault = (500...).include?(status) - - case status - in ..299 - [status, response, stream] - in 300..399 if redirect_count >= self.class::MAX_REDIRECTS - message = "Failed to complete the request within #{self.class::MAX_REDIRECTS} redirects." - - stream.each { next } - raise OpenAI::APIConnectionError.new(url: url, message: message) - in 300..399 - request = self.class.follow_redirect(request, status: status, response_headers: response) - - stream.each { next } - send_request( - request, - redirect_count: redirect_count + 1, - retry_count: retry_count, - send_retry_header: send_retry_header - ) - in OpenAI::APIConnectionError if retry_count >= max_retries - raise status - in (400..) if retry_count >= max_retries || !self.class.should_retry?(status, headers: response) - decoded = OpenAI::Util.decode_content(response, stream: stream, suppress_error: true) - - if srv_fault - OpenAI::Util.close_fused!(stream) - else - stream.each { next } - end - - raise OpenAI::APIStatusError.for( - url: url, - status: status, - body: decoded, - request: nil, - response: response - ) - in (400..) | OpenAI::APIConnectionError - delay = retry_delay(response, retry_count: retry_count) - - if srv_fault - OpenAI::Util.close_fused!(stream) - else - stream&.each { next } - end - sleep(delay) - - send_request( - request, - redirect_count: redirect_count, - retry_count: retry_count + 1, - send_retry_header: send_retry_header - ) - end - end - - # Execute the request specified by `req`. This is the method that all resource - # methods call into. - # - # @param req [Hash{Symbol=>Object}] . - # - # @option req [Symbol] :method - # - # @option req [String, Array] :path - # - # @option req [Hash{String=>Array, String, nil}, nil] :query - # - # @option req [Hash{String=>String, Integer, Array, nil}, nil] :headers - # - # @option req [Object, nil] :body - # - # @option req [Symbol, nil] :unwrap - # - # @option req [Class, nil] :page - # - # @option req [Class, nil] :stream - # - # @option req [OpenAI::Converter, Class, nil] :model - # - # @option req [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :options - # - # @raise [OpenAI::APIError] - # @return [Object] - # - def request(req) - self.class.validate!(req) - model = req.fetch(:model) { OpenAI::Unknown } - opts = req[:options].to_h - OpenAI::RequestOptions.validate!(opts) - request = build_request(req.except(:options), opts) - url = request.fetch(:url) - - # Don't send the current retry count in the headers if the caller modified the header defaults. - send_retry_header = request.fetch(:headers)["x-stainless-retry-count"] == "0" - status, response, stream = send_request( - request, - redirect_count: 0, - retry_count: 0, - send_retry_header: send_retry_header - ) - - decoded = OpenAI::Util.decode_content(response, stream: stream) - case req - in { stream: Class => st } - st.new(model: model, url: url, status: status, response: response, messages: decoded) - in { page: Class => page } - page.new(client: self, req: req, headers: response, page_data: decoded) - else - unwrapped = OpenAI::Util.dig(decoded, req[:unwrap]) - OpenAI::Converter.coerce(model, unwrapped) - end - end - - # @return [String] - # - def inspect - # rubocop:disable Layout/LineLength - base_url = OpenAI::Util.unparse_uri(@base_url) - "#<#{self.class.name}:0x#{object_id.to_s(16)} base_url=#{base_url} max_retries=#{@max_retries} timeout=#{@timeout}>" - # rubocop:enable Layout/LineLength - end - end -end diff --git a/lib/openai/base_model.rb b/lib/openai/base_model.rb deleted file mode 100644 index 8f58a6ab..00000000 --- a/lib/openai/base_model.rb +++ /dev/null @@ -1,1266 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - # @private - # - # @abstract - # - module Converter - # rubocop:disable Lint/UnusedMethodArgument - - # @private - # - # @param value [Object] - # - # @return [Object] - # - def coerce(value) = value - - # @private - # - # @param value [Object] - # - # @return [Object] - # - def dump(value) = value - - # @private - # - # @param value [Object] - # - # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # - def try_strict_coerce(value) = (raise NotImplementedError) - - # rubocop:enable Lint/UnusedMethodArgument - - class << self - # @private - # - # @param spec [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] . - # - # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const - # - # @option spec [Proc] :enum - # - # @option spec [Proc] :union - # - # @option spec [Boolean] :"nil?" - # - # @return [Proc] - # - def type_info(spec) - case spec - in Hash - type_info(spec.slice(:const, :enum, :union).first&.last) - in Proc - spec - in OpenAI::Converter | Class - -> { spec } - in true | false - -> { OpenAI::BooleanModel } - in NilClass | true | false | Symbol | Integer | Float - -> { spec.class } - end - end - - # @private - # - # Based on `target`, transform `value` into `target`, to the extent possible: - # - # 1. if the given `value` conforms to `target` already, return the given `value` - # 2. if it's possible and safe to convert the given `value` to `target`, then the - # converted value - # 3. otherwise, the given `value` unaltered - # - # @param target [OpenAI::Converter, Class] - # @param value [Object] - # - # @return [Object] - # - def coerce(target, value) - case target - in OpenAI::Converter - target.coerce(value) - in Class - case target - in -> { _1 <= NilClass } - nil - in -> { _1 <= Integer } - value.is_a?(Numeric) ? Integer(value) : value - in -> { _1 <= Float } - value.is_a?(Numeric) ? Float(value) : value - in -> { _1 <= Symbol } - value.is_a?(String) ? value.to_sym : value - in -> { _1 <= String } - value.is_a?(Symbol) ? value.to_s : value - in -> { _1 <= Date || _1 <= Time } - value.is_a?(String) ? target.parse(value) : value - in -> { _1 <= IO } - value.is_a?(String) ? StringIO.new(value) : value - else - value - end - end - end - - # @private - # - # @param target [OpenAI::Converter, Class] - # @param value [Object] - # - # @return [Object] - # - def dump(target, value) - case target - in OpenAI::Converter - target.dump(value) - else - value - end - end - - # @private - # - # The underlying algorithm for computing maximal compatibility is subject to - # future improvements. - # - # Similar to `#.coerce`, used to determine the best union variant to decode into. - # - # 1. determine if strict-ish coercion is possible - # 2. return either result of successful coercion or if loose coercion is possible - # 3. return a score for recursively tallied count for fields that can be coerced - # - # @param target [OpenAI::Converter, Class] - # @param value [Object] - # - # @return [Object] - # - def try_strict_coerce(target, value) - case target - in OpenAI::Converter - target.try_strict_coerce(value) - in Class - case [target, value] - in [-> { _1 <= NilClass }, _] - [true, nil, value.nil? ? 1 : 0] - in [-> { _1 <= Integer }, Numeric] - [true, Integer(value), 1] - in [-> { _1 <= Float }, Numeric] - [true, Float(value), 1] - in [-> { _1 <= Symbol }, String] - [true, value.to_sym, 1] - in [-> { _1 <= String }, Symbol] - [true, value.to_s, 1] - in [-> { _1 <= Date || _1 <= Time }, String] - Kernel.then do - [true, target.parse(value), 1] - rescue ArgumentError - [false, false, 0] - end - in [_, ^target] - [true, value, 1] - else - [false, false, 0] - end - end - end - end - end - - # @private - # - # @abstract - # - # When we don't know what to expect for the value. - class Unknown - extend OpenAI::Converter - - # rubocop:disable Lint/UnusedMethodArgument - - private_class_method :new - - # @param other [Object] - # - # @return [Boolean] - # - def self.===(other) = true - - # @param other [Object] - # - # @return [Boolean] - # - def self.==(other) = other.is_a?(Class) && other <= OpenAI::Unknown - - class << self - # @!parse - # # @private - # # - # # @param value [Object] - # # - # # @return [Object] - # # - # def coerce(value) = super - - # @!parse - # # @private - # # - # # @param value [Object] - # # - # # @return [Object] - # # - # def dump(value) = super - - # @private - # - # @param value [Object] - # - # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # - def try_strict_coerce(value) - # prevent unknown variant from being chosen during the first coercion pass - [false, true, 0] - end - end - - # rubocop:enable Lint/UnusedMethodArgument - end - - # @private - # - # @abstract - # - # Ruby has no Boolean class; this is something for models to refer to. - class BooleanModel - extend OpenAI::Converter - - private_class_method :new - - # @param other [Object] - # - # @return [Boolean] - # - def self.===(other) = other == true || other == false - - # @param other [Object] - # - # @return [Boolean] - # - def self.==(other) = other.is_a?(Class) && other <= OpenAI::BooleanModel - - class << self - # @!parse - # # @private - # # - # # @param value [Boolean, Object] - # # - # # @return [Boolean, Object] - # # - # def coerce(value) = super - - # @!parse - # # @private - # # - # # @param value [Boolean, Object] - # # - # # @return [Boolean, Object] - # # - # def dump(value) = super - - # @private - # - # @param value [Object] - # - # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # - def try_strict_coerce(value) - case value - in true | false - [true, value, 1] - else - [false, false, 0] - end - end - end - end - - # @private - # - # @abstract - # - # A value from among a specified list of options. OpenAPI enum values map to Ruby - # values in the SDK as follows: - # - # 1. boolean => true | false - # 2. integer => Integer - # 3. float => Float - # 4. string => Symbol - # - # We can therefore convert string values to Symbols, but can't convert other - # values safely. - # - # @example - # ```ruby - # # `chat_model` is a `OpenAI::Models::ChatModel` - # case chat_model - # when OpenAI::Models::ChatModel::O3_MINI - # # ... - # when OpenAI::Models::ChatModel::O3_MINI_2025_01_31 - # # ... - # when OpenAI::Models::ChatModel::O1 - # # ... - # else - # # ... - # end - # ``` - # - # @example - # ```ruby - # case chat_model - # in :"o3-mini" - # # ... - # in :"o3-mini-2025-01-31" - # # ... - # in :o1 - # # ... - # else - # # ... - # end - # ``` - class Enum - extend OpenAI::Converter - - class << self - # All of the valid Symbol values for this enum. - # - # @return [Array] - # - def values = (@values ||= constants.map { const_get(_1) }) - - # @private - # - # Guard against thread safety issues by instantiating `@values`. - # - private def finalize! = values - end - - private_class_method :new - - # @param other [Object] - # - # @return [Boolean] - # - def self.===(other) = values.include?(other) - - # @param other [Object] - # - # @return [Boolean] - # - def self.==(other) - other.is_a?(Class) && other <= OpenAI::Enum && other.values.to_set == values.to_set - end - - class << self - # @private - # - # @param value [String, Symbol, Object] - # - # @return [Symbol, Object] - # - def coerce(value) = (value.is_a?(String) ? value.to_sym : value) - - # @!parse - # # @private - # # - # # @param value [Symbol, Object] - # # - # # @return [Symbol, Object] - # # - # def dump(value) = super - - # @private - # - # @param value [Object] - # - # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # - def try_strict_coerce(value) - return [true, value, 1] if values.include?(value) - - case value - in String if values.include?(val = value.to_sym) - [true, val, 1] - else - case [value, values.first] - in [true | false, true | false] | [Integer, Integer] | [Symbol | String, Symbol] - [false, true, 0] - else - [false, false, 0] - end - end - end - end - end - - # @private - # - # @abstract - # - # @example - # ```ruby - # # `chat_completion_content_part` is a `OpenAI::Models::Chat::ChatCompletionContentPart` - # case chat_completion_content_part - # when OpenAI::Models::Chat::ChatCompletionContentPartText - # # ... - # when OpenAI::Models::Chat::ChatCompletionContentPartImage - # # ... - # when OpenAI::Models::Chat::ChatCompletionContentPartInputAudio - # # ... - # else - # # ... - # end - # ``` - # - # @example - # ```ruby - # case chat_completion_content_part - # in {type: :text, text: text} - # # ... - # in {type: :image_url, image_url: image_url} - # # ... - # in {type: :input_audio, input_audio: input_audio} - # # ... - # in {type: :file, file: file} - # # ... - # else - # # ... - # end - # ``` - class Union - extend OpenAI::Converter - - class << self - # @private - # - # All of the specified variant info for this union. - # - # @return [Array] - # - private def known_variants = (@known_variants ||= []) - - # @private - # - # All of the specified variants for this union. - # - # @return [Array] - # - protected def variants - @known_variants.map { |key, variant_fn| [key, variant_fn.call] } - end - - # @private - # - # @param property [Symbol] - # - private def discriminator(property) - case property - in Symbol - @discriminator = property - end - end - - # @private - # - # @param key [Symbol, Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] - # - # @param spec [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] . - # - # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const - # - # @option spec [Proc] :enum - # - # @option spec [Proc] :union - # - # @option spec [Boolean] :"nil?" - # - private def variant(key, spec = nil) - variant_info = - case key - in Symbol - [key, OpenAI::Converter.type_info(spec)] - in Proc | OpenAI::Converter | Class | Hash - [nil, OpenAI::Converter.type_info(key)] - end - - known_variants << variant_info - end - - # @private - # - # @param value [Object] - # - # @return [OpenAI::Converter, Class, nil] - # - private def resolve_variant(value) - case [@discriminator, value] - in [_, OpenAI::BaseModel] - value.class - in [Symbol, Hash] - key = - if value.key?(@discriminator) - value.fetch(@discriminator) - elsif value.key?((discriminator = @discriminator.to_s)) - value.fetch(discriminator) - end - - key = key.to_sym if key.is_a?(String) - _, resolved = known_variants.find { |k,| k == key } - resolved.nil? ? OpenAI::Unknown : resolved.call - else - nil - end - end - end - - # rubocop:disable Style/HashEachMethods - # rubocop:disable Style/CaseEquality - - private_class_method :new - - # @param other [Object] - # - # @return [Boolean] - # - def self.===(other) - known_variants.any? do |_, variant_fn| - variant_fn.call === other - end - end - - # @param other [Object] - # - # @return [Boolean] - # - def self.==(other) - other.is_a?(Class) && other <= OpenAI::Union && other.variants == variants - end - - class << self - # @private - # - # @param value [Object] - # - # @return [Object] - # - def coerce(value) - if (variant = resolve_variant(value)) - return OpenAI::Converter.coerce(variant, value) - end - - matches = [] - - known_variants.each do |_, variant_fn| - variant = variant_fn.call - - case OpenAI::Converter.try_strict_coerce(variant, value) - in [true, coerced, _] - return coerced - in [false, true, score] - matches << [score, variant] - in [false, false, _] - nil - end - end - - _, variant = matches.sort! { _2.first <=> _1.first }.find { |score,| !score.zero? } - variant.nil? ? value : OpenAI::Converter.coerce(variant, value) - end - - # @private - # - # @param value [Object] - # - # @return [Object] - # - def dump(value) - if (variant = resolve_variant(value)) - return OpenAI::Converter.dump(variant, value) - end - - known_variants.each do |_, variant_fn| - variant = variant_fn.call - if variant === value - return OpenAI::Converter.dump(variant, value) - end - end - value - end - - # @private - # - # @param value [Object] - # - # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # - def try_strict_coerce(value) - # TODO(ruby) this will result in super linear decoding behaviour for nested unions - # follow up with a decoding context that captures current strictness levels - if (variant = resolve_variant(value)) - return Converter.try_strict_coerce(variant, value) - end - - coercible = false - max_score = 0 - - known_variants.each do |_, variant_fn| - variant = variant_fn.call - - case OpenAI::Converter.try_strict_coerce(variant, value) - in [true, coerced, score] - return [true, coerced, score] - in [false, true, score] - coercible = true - max_score = [max_score, score].max - in [false, false, _] - nil - end - end - - [false, coercible, max_score] - end - end - - # rubocop:enable Style/CaseEquality - # rubocop:enable Style/HashEachMethods - end - - # @private - # - # @abstract - # - # Array of items of a given type. - class ArrayOf - include OpenAI::Converter - - private_class_method :new - - def self.[](...) = new(...) - - # @param other [Object] - # - # @return [Boolean] - # - def ===(other) - type = item_type - case other - in Array - # rubocop:disable Style/CaseEquality - other.all? { type === _1 } - # rubocop:enable Style/CaseEquality - else - false - end - end - - # @param other [Object] - # - # @return [Boolean] - # - def ==(other) = other.is_a?(OpenAI::ArrayOf) && other.item_type == item_type - - # @private - # - # @param value [Enumerable, Object] - # - # @return [Array, Object] - # - def coerce(value) - type = item_type - case value - in Enumerable unless value.is_a?(Hash) - value.map { OpenAI::Converter.coerce(type, _1) } - else - value - end - end - - # @private - # - # @param value [Enumerable, Object] - # - # @return [Array, Object] - # - def dump(value) - type = item_type - case value - in Enumerable unless value.is_a?(Hash) - value.map { OpenAI::Converter.dump(type, _1) }.to_a - else - value - end - end - - # @private - # - # @param value [Object] - # - # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # - def try_strict_coerce(value) - case value - in Array - type = item_type - great_success = true - tally = 0 - - mapped = - value.map do |item| - case OpenAI::Converter.try_strict_coerce(type, item) - in [true, coerced, score] - tally += score - coerced - in [false, true, score] - great_success = false - tally += score - item - in [false, false, _] - great_success &&= item.nil? - item - end - end - - if great_success - [true, mapped, tally] - else - [false, true, tally] - end - else - [false, false, 0] - end - end - - # @private - # - # @return [OpenAI::Converter, Class] - # - protected def item_type = @item_type_fn.call - - # @private - # - # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] - # - # @param spec [Hash{Symbol=>Object}] . - # - # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const - # - # @option spec [Proc] :enum - # - # @option spec [Proc] :union - # - # @option spec [Boolean] :"nil?" - # - def initialize(type_info, spec = {}) - @item_type_fn = OpenAI::Converter.type_info(type_info || spec) - end - end - - # @private - # - # @abstract - # - # Hash of items of a given type. - class HashOf - include OpenAI::Converter - - private_class_method :new - - def self.[](...) = new(...) - - # @param other [Object] - # - # @return [Boolean] - # - def ===(other) - type = item_type - case other - in Hash - other.all? do |key, val| - case [key, val] - in [Symbol | String, ^type] - true - else - false - end - end - else - false - end - end - - # @param other [Object] - # - # @return [Boolean] - # - def ==(other) = other.is_a?(OpenAI::HashOf) && other.item_type == item_type - - # @private - # - # @param value [Hash{Object=>Object}, Object] - # - # @return [Hash{Symbol=>Object}, Object] - # - def coerce(value) - type = item_type - case value - in Hash - value.to_h do |key, val| - coerced = OpenAI::Converter.coerce(type, val) - [key.is_a?(String) ? key.to_sym : key, coerced] - end - else - value - end - end - - # @private - # - # @param value [Hash{Object=>Object}, Object] - # - # @return [Hash{Symbol=>Object}, Object] - # - def dump(value) - type = item_type - case value - in Hash - value.transform_values do |val| - OpenAI::Converter.dump(type, val) - end - else - value - end - end - - # @private - # - # @param value [Object] - # - # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # - def try_strict_coerce(value) - case value - in Hash - type = item_type - great_success = true - tally = 0 - - mapped = - value.transform_values do |val| - case OpenAI::Converter.try_strict_coerce(type, val) - in [true, coerced, score] - tally += score - coerced - in [false, true, score] - great_success = false - tally += score - val - in [false, false, _] - great_success &&= val.nil? - val - end - end - - if great_success - [true, mapped, tally] - else - [false, true, tally] - end - else - [false, false, 0] - end - end - - # @private - # - # @return [OpenAI::Converter, Class] - # - protected def item_type = @item_type_fn.call - - # @private - # - # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] - # - # @param spec [Hash{Symbol=>Object}] . - # - # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const - # - # @option spec [Proc] :enum - # - # @option spec [Proc] :union - # - # @option spec [Boolean] :"nil?" - # - def initialize(type_info, spec = {}) - @item_type_fn = OpenAI::Converter.type_info(type_info || spec) - end - end - - # @private - # - # @abstract - # - # @example - # ```ruby - # # `comparison_filter` is a `OpenAI::Models::ComparisonFilter` - # comparison_filter => { - # key: key, - # type: type, - # value: value - # } - # ``` - class BaseModel - extend OpenAI::Converter - - class << self - # @private - # - # Assumes superclass fields are totally defined before fields are accessed / - # defined on subclasses. - # - # @return [Hash{Symbol=>Hash{Symbol=>Object}}] - # - def known_fields - @known_fields ||= (self < OpenAI::BaseModel ? superclass.known_fields.dup : {}) - end - - # @return [Hash{Symbol=>Hash{Symbol=>Object}}] - # - def fields - known_fields.transform_values do |field| - {**field.except(:type_fn), type: field.fetch(:type_fn).call} - end - end - - # @private - # - # @return [Hash{Symbol=>Proc}] - # - def defaults = (@defaults ||= {}) - - # @private - # - # @param name_sym [Symbol] - # - # @param required [Boolean] - # - # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] - # - # @param spec [Hash{Symbol=>Object}] . - # - # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const - # - # @option spec [Proc] :enum - # - # @option spec [Proc] :union - # - # @option spec [Boolean] :"nil?" - # - private def add_field(name_sym, required:, type_info:, spec:) - type_fn, info = - case type_info - in Proc | Class | OpenAI::Converter - [OpenAI::Converter.type_info({**spec, union: type_info}), spec] - in Hash - [OpenAI::Converter.type_info(type_info), type_info] - end - - fallback = info[:const] - defaults[name_sym] = fallback if required && !info[:nil?] && info.key?(:const) - - key = info.fetch(:api_name, name_sym) - setter = "#{name_sym}=" - - if known_fields.key?(name_sym) - [name_sym, setter].each { undef_method(_1) } - end - - known_fields[name_sym] = {mode: @mode, key: key, required: required, type_fn: type_fn} - - define_method(setter) do |val| - @data[key] = val - end - - define_method(name_sym) do - field_type = type_fn.call - value = @data.fetch(key) { self.class.defaults[key] } - OpenAI::Converter.coerce(field_type, value) - rescue StandardError - name = self.class.name.split("::").last - raise OpenAI::ConversionError.new( - "Failed to parse #{name}.#{name_sym} as #{field_type.inspect}. " \ - "To get the unparsed API response, use #{name}[:#{key}]." - ) - end - end - - # @private - # - # @param name_sym [Symbol] - # - # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] - # - # @param spec [Hash{Symbol=>Object}] . - # - # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const - # - # @option spec [Proc] :enum - # - # @option spec [Proc] :union - # - # @option spec [Boolean] :"nil?" - # - def required(name_sym, type_info, spec = {}) - add_field(name_sym, required: true, type_info: type_info, spec: spec) - end - - # @private - # - # @param name_sym [Symbol] - # - # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] - # - # @param spec [Hash{Symbol=>Object}] . - # - # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const - # - # @option spec [Proc] :enum - # - # @option spec [Proc] :union - # - # @option spec [Boolean] :"nil?" - # - def optional(name_sym, type_info, spec = {}) - add_field(name_sym, required: false, type_info: type_info, spec: spec) - end - - # @private - # - # `request_only` attributes not excluded from `.#coerce` when receiving responses - # even if well behaved servers should not send them - # - # @param blk [Proc] - # - private def request_only(&blk) - @mode = :dump - blk.call - ensure - @mode = nil - end - - # @private - # - # `response_only` attributes are omitted from `.#dump` when making requests - # - # @param blk [Proc] - # - private def response_only(&blk) - @mode = :coerce - blk.call - ensure - @mode = nil - end - end - - # @param other [Object] - # - # @return [Boolean] - # - def ==(other) - case other - in OpenAI::BaseModel - self.class.fields == other.class.fields && @data == other.to_h - else - false - end - end - - class << self - # @private - # - # @param value [OpenAI::BaseModel, Hash{Object=>Object}, Object] - # - # @return [OpenAI::BaseModel, Object] - # - def coerce(value) - case OpenAI::Util.coerce_hash(value) - in Hash => coerced - new(coerced) - else - value - end - end - - # @private - # - # @param value [OpenAI::BaseModel, Object] - # - # @return [Hash{Object=>Object}, Object] - # - def dump(value) - unless (coerced = OpenAI::Util.coerce_hash(value)).is_a?(Hash) - return value - end - - values = coerced.filter_map do |key, val| - name = key.to_sym - case (field = known_fields[name]) - in nil - [name, val] - else - mode, type_fn, api_name = field.fetch_values(:mode, :type_fn, :key) - case mode - in :coerce - next - else - target = type_fn.call - [api_name, OpenAI::Converter.dump(target, val)] - end - end - end.to_h - - defaults.each do |key, val| - next if values.key?(key) - - values[key] = val - end - - values - end - - # @private - # - # @param value [Object] - # - # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # - def try_strict_coerce(value) - case value - in Hash | OpenAI::BaseModel - value = value.to_h - else - return [false, false, 0] - end - - keys = value.keys.to_set - great_success = true - tally = 0 - acc = {} - - known_fields.each_value do |field| - mode, required, type_fn, api_name = field.fetch_values(:mode, :required, :type_fn, :key) - keys.delete(api_name) - - case [required && mode != :dump, value.key?(api_name)] - in [_, true] - target = type_fn.call - item = value.fetch(api_name) - case OpenAI::Converter.try_strict_coerce(target, item) - in [true, coerced, score] - tally += score - acc[api_name] = coerced - in [false, true, score] - great_success = false - tally += score - acc[api_name] = item - in [false, false, _] - great_success &&= item.nil? - end - in [true, false] - great_success = false - in [false, false] - nil - end - end - - keys.each do |key| - acc[key] = value.fetch(key) - end - - great_success ? [true, new(acc), tally] : [false, true, tally] - end - end - - # Returns the raw value associated with the given key, if found. Otherwise, nil is - # returned. - # - # It is valid to lookup keys that are not in the API spec, for example to access - # undocumented features. This method does not parse response data into - # higher-level types. Lookup by anything other than a Symbol is an ArgumentError. - # - # @param key [Symbol] - # - # @return [Object, nil] - # - def [](key) - unless key.instance_of?(Symbol) - raise ArgumentError.new("Expected symbol key for lookup, got #{key.inspect}") - end - - @data[key] - end - - # Returns a Hash of the data underlying this object. O(1) - # - # Keys are Symbols and values are the raw values from the response. The return - # value indicates which values were ever set on the object. i.e. there will be a - # key in this hash if they ever were, even if the set value was nil. - # - # This method is not recursive. The returned value is shared by the object, so it - # should not be mutated. - # - # @return [Hash{Symbol=>Object}] - # - def to_h = @data - - alias_method :to_hash, :to_h - - # @param keys [Array, nil] - # - # @return [Hash{Symbol=>Object}] - # - def deconstruct_keys(keys) - (keys || self.class.known_fields.keys).filter_map do |k| - unless self.class.known_fields.key?(k) - next - end - - [k, method(k).call] - end - .to_h - end - - # Create a new instance of a model. - # - # @param data [Hash{Symbol=>Object}, OpenAI::BaseModel] - # - def initialize(data = {}) - case OpenAI::Util.coerce_hash(data) - in Hash => coerced - @data = coerced.transform_keys(&:to_sym) - else - raise ArgumentError.new("Expected a #{Hash} or #{OpenAI::BaseModel}, got #{data.inspect}") - end - end - - # @return [String] - # - def to_s = @data.to_s - - # @return [String] - # - def inspect - "#<#{self.class.name}:0x#{object_id.to_s(16)} #{deconstruct_keys(nil).map do |k, v| - "#{k}=#{v.inspect}" - end.join(' ')}>" - end - end -end diff --git a/lib/openai/base_page.rb b/lib/openai/base_page.rb deleted file mode 100644 index 762b38d7..00000000 --- a/lib/openai/base_page.rb +++ /dev/null @@ -1,60 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - # @private - # - # @abstract - # - # @example - # ```ruby - # if page.has_next? - # page = page.next_page - # end - # ``` - # - # @example - # ```ruby - # page.auto_paging_each do |completion| - # puts(completion) - # end - # ``` - # - # @example - # ```ruby - # completions = page.to_enum.take(2) - # - # completions => Array - # ``` - module BasePage - # @return [Boolean] - # - def next_page? = (raise NotImplementedError) - - # @raise [OpenAI::APIError] - # @return [OpenAI::BasePage] - # - def next_page = (raise NotImplementedError) - - # @param blk [Proc] - # - # @return [void] - # - def auto_paging_each(&) = (raise NotImplementedError) - - # @return [Enumerable] - # - def to_enum = super(:auto_paging_each) - - alias_method :enum_for, :to_enum - - # @!parse - # # @private - # # - # # @param client [OpenAI::BaseClient] - # # @param req [Hash{Symbol=>Object}] - # # @param headers [Hash{String=>String}, Net::HTTPHeader] - # # @param page_data [Object] - # # - # def initialize(client:, req:, headers:, page_data:); end - end -end diff --git a/lib/openai/base_stream.rb b/lib/openai/base_stream.rb deleted file mode 100644 index 51e8dee6..00000000 --- a/lib/openai/base_stream.rb +++ /dev/null @@ -1,64 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - # @private - # - # @example - # ```ruby - # stream.for_each do |message| - # puts(message) - # end - # ``` - # - # @example - # ```ruby - # messages = stream.to_enum.take(2) - # - # messages => Array - # ``` - class BaseStream - # @private - # - # @param model [Class, OpenAI::Converter] - # @param url [URI::Generic] - # @param status [Integer] - # @param response [Net::HTTPResponse] - # @param messages [Enumerable] - # - def initialize(model:, url:, status:, response:, messages:) - @model = model - @url = url - @status = status - @response = response - @messages = messages - @iterator = iterator - end - - # @private - # - # @return [Enumerable] - # - private def iterator = (raise NotImplementedError) - - # @return [void] - # - def close = OpenAI::Util.close_fused!(@iterator) - - # @param blk [Proc] - # - # @return [void] - # - def for_each(&) - unless block_given? - raise ArgumentError.new("A block must be given to ##{__method__}") - end - @iterator.each(&) - end - - # @return [Enumerable] - # - def to_enum = @iterator - - alias_method :enum_for, :to_enum - end -end diff --git a/lib/openai/client.rb b/lib/openai/client.rb index 7160cc26..b2bef0ac 100644 --- a/lib/openai/client.rb +++ b/lib/openai/client.rb @@ -1,7 +1,7 @@ # frozen_string_literal: true module OpenAI - class Client < OpenAI::BaseClient + class Client < OpenAI::Internal::Transport::BaseClient # Default max number of retries to attempt after a failed retryable request. DEFAULT_MAX_RETRIES = 2 @@ -51,9 +51,15 @@ class Client < OpenAI::BaseClient # @return [OpenAI::Resources::FineTuning] attr_reader :fine_tuning + # @return [OpenAI::Resources::Graders] + attr_reader :graders + # @return [OpenAI::Resources::VectorStores] attr_reader :vector_stores + # @return [OpenAI::Resources::Webhooks] + attr_reader :webhooks + # @return [OpenAI::Resources::Beta] attr_reader :beta @@ -66,10 +72,18 @@ class Client < OpenAI::BaseClient # @return [OpenAI::Resources::Responses] attr_reader :responses - # @private + # @return [OpenAI::Resources::Conversations] + attr_reader :conversations + + # @return [OpenAI::Resources::Evals] + attr_reader :evals + + # @return [OpenAI::Resources::Containers] + attr_reader :containers + + # @api private # # @return [Hash{String=>String}] - # private def auth_headers return {} if @api_key.nil? @@ -78,14 +92,15 @@ class Client < OpenAI::BaseClient # Creates and returns a new client for interacting with the API. # - # @param base_url [String, nil] Override the default base URL for the API, e.g., `"https://api.example.com/v2/"` - # # @param api_key [String, nil] Defaults to `ENV["OPENAI_API_KEY"]` # # @param organization [String, nil] Defaults to `ENV["OPENAI_ORG_ID"]` # # @param project [String, nil] Defaults to `ENV["OPENAI_PROJECT_ID"]` # + # @param base_url [String, nil] Override the default base URL for the API, e.g., + # `"https://api.example.com/v2/"`. Defaults to `ENV["OPENAI_BASE_URL"]` + # # @param max_retries [Integer] Max number of retries to attempt after a failed retryable request. # # @param timeout [Float] @@ -93,21 +108,20 @@ class Client < OpenAI::BaseClient # @param initial_retry_delay [Float] # # @param max_retry_delay [Float] - # def initialize( - base_url: nil, api_key: ENV["OPENAI_API_KEY"], organization: ENV["OPENAI_ORG_ID"], project: ENV["OPENAI_PROJECT_ID"], - max_retries: DEFAULT_MAX_RETRIES, - timeout: DEFAULT_TIMEOUT_IN_SECONDS, - initial_retry_delay: DEFAULT_INITIAL_RETRY_DELAY, - max_retry_delay: DEFAULT_MAX_RETRY_DELAY + base_url: ENV["OPENAI_BASE_URL"], + max_retries: self.class::DEFAULT_MAX_RETRIES, + timeout: self.class::DEFAULT_TIMEOUT_IN_SECONDS, + initial_retry_delay: self.class::DEFAULT_INITIAL_RETRY_DELAY, + max_retry_delay: self.class::DEFAULT_MAX_RETRY_DELAY ) base_url ||= "https://api.openai.com/v1" if api_key.nil? - raise ArgumentError.new("api_key is required") + raise ArgumentError.new("api_key is required, and can be set via environ: \"OPENAI_API_KEY\"") end headers = { @@ -135,11 +149,16 @@ def initialize( @moderations = OpenAI::Resources::Moderations.new(client: self) @models = OpenAI::Resources::Models.new(client: self) @fine_tuning = OpenAI::Resources::FineTuning.new(client: self) + @graders = OpenAI::Resources::Graders.new(client: self) @vector_stores = OpenAI::Resources::VectorStores.new(client: self) + @webhooks = OpenAI::Resources::Webhooks.new(client: self) @beta = OpenAI::Resources::Beta.new(client: self) @batches = OpenAI::Resources::Batches.new(client: self) @uploads = OpenAI::Resources::Uploads.new(client: self) @responses = OpenAI::Resources::Responses.new(client: self) + @conversations = OpenAI::Resources::Conversations.new(client: self) + @evals = OpenAI::Resources::Evals.new(client: self) + @containers = OpenAI::Resources::Containers.new(client: self) end end end diff --git a/lib/openai/cursor_page.rb b/lib/openai/cursor_page.rb deleted file mode 100644 index f9f84dcd..00000000 --- a/lib/openai/cursor_page.rb +++ /dev/null @@ -1,98 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - # @example - # ```ruby - # if cursor_page.has_next? - # cursor_page = cursor_page.next_page - # end - # ``` - # - # @example - # ```ruby - # cursor_page.auto_paging_each do |completion| - # puts(completion) - # end - # ``` - # - # @example - # ```ruby - # completions = cursor_page.to_enum.take(2) - # - # completions => Array - # ``` - class CursorPage - include OpenAI::BasePage - - # @return [Array] - attr_accessor :data - - # @return [Boolean] - attr_accessor :has_more - - # rubocop:disable Lint/UnusedMethodArgument - # @private - # - # @param client [OpenAI::BaseClient] - # @param req [Hash{Symbol=>Object}] - # @param headers [Hash{String=>String}, Net::HTTPHeader] - # @param page_data [Hash{Symbol=>Object}] - # - def initialize(client:, req:, headers:, page_data:) - @client = client - @req = req - model = req.fetch(:model) - - case page_data - in {data: Array | nil => data} - @data = data&.map { model.coerce(_1) } - else - end - - case page_data - in {has_more: true | false | nil => has_more} - @has_more = has_more - else - end - end - # rubocop:enable Lint/UnusedMethodArgument - - # @return [Boolean] - # - def next_page? - has_more - end - - # @raise [OpenAI::HTTP::Error] - # @return [OpenAI::CursorPage] - # - def next_page - unless next_page? - raise RuntimeError.new("No more pages available. Please check #next_page? before calling ##{__method__}") - end - - req = OpenAI::Util.deep_merge(@req, {query: {after: data&.last&.id}}) - @client.request(req) - end - - # @param blk [Proc] - # - def auto_paging_each(&blk) - unless block_given? - raise ArgumentError.new("A block must be given to ##{__method__}") - end - page = self - loop do - page.data&.each { blk.call(_1) } - break unless page.next_page? - page = page.next_page - end - end - - # @return [String] - # - def inspect - "#<#{self.class}:0x#{object_id.to_s(16)} data=#{data.inspect} has_more=#{has_more.inspect}>" - end - end -end diff --git a/lib/openai/errors.rb b/lib/openai/errors.rb index 40faaaae..d7cd4e94 100644 --- a/lib/openai/errors.rb +++ b/lib/openai/errors.rb @@ -1,224 +1,250 @@ # frozen_string_literal: true module OpenAI - class Error < StandardError - # @!parse - # # @return [StandardError, nil] - # attr_reader :cause - end - - class ConversionError < OpenAI::Error - end - - class APIError < OpenAI::Error - # @return [URI::Generic] - attr_reader :url - - # @return [Integer, nil] - attr_reader :status - - # @return [Object, nil] - attr_reader :body - - # @return [String, nil] - attr_reader :code - - # @return [String, nil] - attr_reader :param - - # @return [String, nil] - attr_reader :type - - # @private - # - # @param url [URI::Generic] - # @param status [Integer, nil] - # @param body [Object, nil] - # @param request [nil] - # @param response [nil] - # @param message [String, nil] - # - def initialize(url:, status: nil, body: nil, request: nil, response: nil, message: nil) - @url = url - @status = status - @body = body - @request = request - @response = response - super(message) + module Errors + class Error < StandardError + # @!attribute cause + # + # @return [StandardError, nil] end - end - class APIConnectionError < OpenAI::APIError - # @!parse - # # @return [nil] - # attr_reader :status - - # @!parse - # # @return [nil] - # attr_reader :body - - # @!parse - # # @return [nil] - # attr_reader :code - - # @!parse - # # @return [nil] - # attr_reader :param - - # @!parse - # # @return [nil] - # attr_reader :type - - # @private - # - # @param url [URI::Generic] - # @param status [nil] - # @param body [nil] - # @param request [nil] - # @param response [nil] - # @param message [String, nil] - # - def initialize( - url:, - status: nil, - body: nil, - request: nil, - response: nil, - message: "Connection error." - ) - super + class ConversionError < OpenAI::Errors::Error + # @return [StandardError, nil] + def cause = @cause.nil? ? super : @cause + + # @api private + # + # @param on [Class] + # @param method [Symbol] + # @param target [Object] + # @param value [Object] + # @param cause [StandardError, nil] + def initialize(on:, method:, target:, value:, cause: nil) + cls = on.name.split("::").last + + message = [ + "Failed to parse #{cls}.#{method} from #{value.class} to #{target.inspect}.", + "To get the unparsed API response, use #{cls}[#{method.inspect}].", + cause && "Cause: #{cause.message}" + ].filter(&:itself).join(" ") + + @cause = cause + super(message) + end end - end - class APITimeoutError < OpenAI::APIConnectionError - # @private - # - # @param url [URI::Generic] - # @param status [nil] - # @param body [nil] - # @param request [nil] - # @param response [nil] - # @param message [String, nil] - # - def initialize( - url:, - status: nil, - body: nil, - request: nil, - response: nil, - message: "Request timed out." - ) - super + class APIError < OpenAI::Errors::Error + # @return [URI::Generic] + attr_accessor :url + + # @return [Integer, nil] + attr_accessor :status + + # @return [Object, nil] + attr_accessor :body + + # @return [String, nil] + attr_accessor :code + + # @return [String, nil] + attr_accessor :param + + # @return [String, nil] + attr_accessor :type + + # @api private + # + # @param url [URI::Generic] + # @param status [Integer, nil] + # @param body [Object, nil] + # @param request [nil] + # @param response [nil] + # @param message [String, nil] + def initialize(url:, status: nil, body: nil, request: nil, response: nil, message: nil) + @url = url + @status = status + @body = body + @request = request + @response = response + super(message) + end end - end - class APIStatusError < OpenAI::APIError - # @private - # - # @param url [URI::Generic] - # @param status [Integer] - # @param body [Object, nil] - # @param request [nil] - # @param response [nil] - # @param message [String, nil] - # - # @return [OpenAI::APIStatusError] - # - def self.for(url:, status:, body:, request:, response:, message: nil) - kwargs = {url: url, status: status, body: body, request: request, response: response, message: message} - - case status - in 400 - OpenAI::BadRequestError.new(**kwargs) - in 401 - OpenAI::AuthenticationError.new(**kwargs) - in 403 - OpenAI::PermissionDeniedError.new(**kwargs) - in 404 - OpenAI::NotFoundError.new(**kwargs) - in 409 - OpenAI::ConflictError.new(**kwargs) - in 422 - OpenAI::UnprocessableEntityError.new(**kwargs) - in 429 - OpenAI::RateLimitError.new(**kwargs) - in (500..) - OpenAI::InternalServerError.new(**kwargs) - else - OpenAI::APIStatusError.new(**kwargs) + class APIConnectionError < OpenAI::Errors::APIError + # @!attribute status + # + # @return [nil] + + # @!attribute body + # + # @return [nil] + + # @!attribute code + # + # @return [nil] + + # @!attribute param + # + # @return [nil] + + # @!attribute type + # + # @return [nil] + + # @api private + # + # @param url [URI::Generic] + # @param status [nil] + # @param body [nil] + # @param request [nil] + # @param response [nil] + # @param message [String, nil] + def initialize( + url:, + status: nil, + body: nil, + request: nil, + response: nil, + message: "Connection error." + ) + super end end - # @!parse - # # @return [Integer] - # attr_reader :status - - # @!parse - # # @return [String, nil] - # attr_reader :code - - # @!parse - # # @return [String, nil] - # attr_reader :param - - # @!parse - # # @return [String, nil] - # attr_reader :type - - # @private - # - # @param url [URI::Generic] - # @param status [Integer] - # @param body [Object, nil] - # @param request [nil] - # @param response [nil] - # @param message [String, nil] - # - def initialize(url:, status:, body:, request:, response:, message: nil) - message ||= OpenAI::Util.dig(body, :message) { {url: url.to_s, status: status, body: body} } - @code = OpenAI::Converter.coerce(String, OpenAI::Util.dig(body, :code)) - @param = OpenAI::Converter.coerce(String, OpenAI::Util.dig(body, :param)) - @type = OpenAI::Converter.coerce(String, OpenAI::Util.dig(body, :type)) - super( - url: url, - status: status, - body: body, - request: request, - response: response, - message: message&.to_s + class APITimeoutError < OpenAI::Errors::APIConnectionError + # @api private + # + # @param url [URI::Generic] + # @param status [nil] + # @param body [nil] + # @param request [nil] + # @param response [nil] + # @param message [String, nil] + def initialize( + url:, + status: nil, + body: nil, + request: nil, + response: nil, + message: "Request timed out." ) + super + end end - end - class BadRequestError < OpenAI::APIStatusError - HTTP_STATUS = 400 - end + class APIStatusError < OpenAI::Errors::APIError + # @api private + # + # @param url [URI::Generic] + # @param status [Integer] + # @param body [Object, nil] + # @param request [nil] + # @param response [nil] + # @param message [String, nil] + # + # @return [self] + def self.for(url:, status:, body:, request:, response:, message: nil) + kwargs = { + url: url, + status: status, + body: body, + request: request, + response: response, + message: message + } + + case status + in 400 + OpenAI::Errors::BadRequestError.new(**kwargs) + in 401 + OpenAI::Errors::AuthenticationError.new(**kwargs) + in 403 + OpenAI::Errors::PermissionDeniedError.new(**kwargs) + in 404 + OpenAI::Errors::NotFoundError.new(**kwargs) + in 409 + OpenAI::Errors::ConflictError.new(**kwargs) + in 422 + OpenAI::Errors::UnprocessableEntityError.new(**kwargs) + in 429 + OpenAI::Errors::RateLimitError.new(**kwargs) + in (500..) + OpenAI::Errors::InternalServerError.new(**kwargs) + else + OpenAI::Errors::APIStatusError.new(**kwargs) + end + end - class AuthenticationError < OpenAI::APIStatusError - HTTP_STATUS = 401 - end + # @!parse + # # @return [Integer] + # attr_accessor :status + + # @!parse + # # @return [String, nil] + # attr_accessor :code + + # @!parse + # # @return [String, nil] + # attr_accessor :param + + # @!parse + # # @return [String, nil] + # attr_accessor :type + + # @api private + # + # @param url [URI::Generic] + # @param status [Integer] + # @param body [Object, nil] + # @param request [nil] + # @param response [nil] + # @param message [String, nil] + def initialize(url:, status:, body:, request:, response:, message: nil) + message ||= OpenAI::Internal::Util.dig(body, :message) { {url: url.to_s, status: status, body: body} } + @code = OpenAI::Internal::Type::Converter.coerce(String, OpenAI::Internal::Util.dig(body, :code)) + @param = OpenAI::Internal::Type::Converter.coerce(String, OpenAI::Internal::Util.dig(body, :param)) + @type = OpenAI::Internal::Type::Converter.coerce(String, OpenAI::Internal::Util.dig(body, :type)) + super( + url: url, + status: status, + body: body, + request: request, + response: response, + message: message&.to_s + ) + end + end - class PermissionDeniedError < OpenAI::APIStatusError - HTTP_STATUS = 403 - end + class BadRequestError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 400 + end - class NotFoundError < OpenAI::APIStatusError - HTTP_STATUS = 404 - end + class AuthenticationError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 401 + end - class ConflictError < OpenAI::APIStatusError - HTTP_STATUS = 409 - end + class PermissionDeniedError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 403 + end - class UnprocessableEntityError < OpenAI::APIStatusError - HTTP_STATUS = 422 - end + class NotFoundError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 404 + end - class RateLimitError < OpenAI::APIStatusError - HTTP_STATUS = 429 - end + class ConflictError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 409 + end + + class UnprocessableEntityError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 422 + end + + class RateLimitError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 429 + end - class InternalServerError < OpenAI::APIStatusError - HTTP_STATUS = (500..) + class InternalServerError < OpenAI::Errors::APIStatusError + HTTP_STATUS = (500..) + end end end diff --git a/lib/openai/extern.rb b/lib/openai/extern.rb deleted file mode 100644 index 3faad4c1..00000000 --- a/lib/openai/extern.rb +++ /dev/null @@ -1,10 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - # @private - # - # @abstract - # - module Extern - end -end diff --git a/lib/openai/file_part.rb b/lib/openai/file_part.rb new file mode 100644 index 00000000..f853ca4d --- /dev/null +++ b/lib/openai/file_part.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +module OpenAI + class FilePart + # @return [Pathname, StringIO, IO, String] + attr_reader :content + + # @return [String, nil] + attr_reader :content_type + + # @return [String, nil] + attr_reader :filename + + # @api private + # + # @return [String] + private def read + case content + in Pathname + content.read(binmode: true) + in StringIO + content.string + in IO + content.read + in String + content + end + end + + # @param a [Object] + # + # @return [String] + def to_json(*a) = read.to_json(*a) + + # @param a [Object] + # + # @return [String] + def to_yaml(*a) = read.to_yaml(*a) + + # @param content [Pathname, StringIO, IO, String] + # @param filename [String, nil] + # @param content_type [String, nil] + def initialize(content, filename: nil, content_type: nil) + @content = content + @filename = + case content + in Pathname + filename.nil? ? content.basename.to_path : ::File.basename(filename) + else + filename.nil? ? nil : ::File.basename(filename) + end + @content_type = content_type + end + end +end diff --git a/lib/openai/internal.rb b/lib/openai/internal.rb new file mode 100644 index 00000000..0fc8d8ad --- /dev/null +++ b/lib/openai/internal.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + OMIT = + Object.new.tap do + _1.define_singleton_method(:inspect) { "#<#{OpenAI::Internal}::OMIT>" } + end + .freeze + + define_sorbet_constant!(:AnyHash) do + T.type_alias { T::Hash[Symbol, T.anything] } + end + define_sorbet_constant!(:FileInput) do + T.type_alias { T.any(Pathname, StringIO, IO, String, OpenAI::FilePart) } + end + end +end diff --git a/lib/openai/internal/conversation_cursor_page.rb b/lib/openai/internal/conversation_cursor_page.rb new file mode 100644 index 00000000..6dddc16e --- /dev/null +++ b/lib/openai/internal/conversation_cursor_page.rb @@ -0,0 +1,92 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + # @generic Elem + # + # @example + # if conversation_cursor_page.has_next? + # conversation_cursor_page = conversation_cursor_page.next_page + # end + # + # @example + # conversation_cursor_page.auto_paging_each do |item| + # puts(item) + # end + class ConversationCursorPage + include OpenAI::Internal::Type::BasePage + + # @return [Array>, nil] + attr_accessor :data + + # @return [Boolean] + attr_accessor :has_more + + # @return [String] + attr_accessor :last_id + + # @return [Boolean] + def next_page? + has_more + end + + # @raise [OpenAI::HTTP::Error] + # @return [self] + def next_page + unless next_page? + message = "No more pages available. Please check #next_page? before calling ##{__method__}" + raise RuntimeError.new(message) + end + + req = OpenAI::Internal::Util.deep_merge(@req, {query: {after: last_id}}) + @client.request(req) + end + + # @param blk [Proc] + # + # @yieldparam [generic] + def auto_paging_each(&blk) + unless block_given? + raise ArgumentError.new("A block must be given to ##{__method__}") + end + + page = self + loop do + page.data&.each(&blk) + + break unless page.next_page? + page = page.next_page + end + end + + # @api private + # + # @param client [OpenAI::Internal::Transport::BaseClient] + # @param req [Hash{Symbol=>Object}] + # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param page_data [Hash{Symbol=>Object}] + def initialize(client:, req:, headers:, page_data:) + super + + case page_data + in {data: Array => data} + @data = data.map { OpenAI::Internal::Type::Converter.coerce(@model, _1) } + else + end + @has_more = page_data[:has_more] + @last_id = page_data[:last_id] + end + + # @api private + # + # @return [String] + def inspect + # rubocop:disable Layout/LineLength + model = OpenAI::Internal::Type::Converter.inspect(@model, depth: 1) + + "#<#{self.class}[#{model}]:0x#{object_id.to_s(16)} has_more=#{has_more.inspect} last_id=#{last_id.inspect}>" + # rubocop:enable Layout/LineLength + end + end + end +end diff --git a/lib/openai/internal/cursor_page.rb b/lib/openai/internal/cursor_page.rb new file mode 100644 index 00000000..5f68a217 --- /dev/null +++ b/lib/openai/internal/cursor_page.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + # @generic Elem + # + # @example + # if cursor_page.has_next? + # cursor_page = cursor_page.next_page + # end + # + # @example + # cursor_page.auto_paging_each do |completion| + # puts(completion) + # end + class CursorPage + include OpenAI::Internal::Type::BasePage + + # @return [Array>, nil] + attr_accessor :data + + # @return [Boolean] + attr_accessor :has_more + + # @return [Boolean] + def next_page? + has_more + end + + # @raise [OpenAI::HTTP::Error] + # @return [self] + def next_page + unless next_page? + message = "No more pages available. Please check #next_page? before calling ##{__method__}" + raise RuntimeError.new(message) + end + + req = OpenAI::Internal::Util.deep_merge(@req, {query: {after: data&.last&.id}}) + @client.request(req) + end + + # @param blk [Proc] + # + # @yieldparam [generic] + def auto_paging_each(&blk) + unless block_given? + raise ArgumentError.new("A block must be given to ##{__method__}") + end + + page = self + loop do + page.data&.each(&blk) + + break unless page.next_page? + page = page.next_page + end + end + + # @api private + # + # @param client [OpenAI::Internal::Transport::BaseClient] + # @param req [Hash{Symbol=>Object}] + # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param page_data [Hash{Symbol=>Object}] + def initialize(client:, req:, headers:, page_data:) + super + + case page_data + in {data: Array => data} + @data = data.map { OpenAI::Internal::Type::Converter.coerce(@model, _1) } + else + end + @has_more = page_data[:has_more] + end + + # @api private + # + # @return [String] + def inspect + model = OpenAI::Internal::Type::Converter.inspect(@model, depth: 1) + + "#<#{self.class}[#{model}]:0x#{object_id.to_s(16)} has_more=#{has_more.inspect}>" + end + end + end +end diff --git a/lib/openai/internal/page.rb b/lib/openai/internal/page.rb new file mode 100644 index 00000000..36c350a4 --- /dev/null +++ b/lib/openai/internal/page.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + # @generic Elem + # + # @example + # if page.has_next? + # page = page.next_page + # end + # + # @example + # page.auto_paging_each do |model| + # puts(model) + # end + class Page + include OpenAI::Internal::Type::BasePage + + # @return [Array>, nil] + attr_accessor :data + + # @return [String] + attr_accessor :object + + # @return [Boolean] + def next_page? + false + end + + # @raise [OpenAI::HTTP::Error] + # @return [self] + def next_page + RuntimeError.new("No more pages available.") + end + + # @param blk [Proc] + # + # @yieldparam [generic] + def auto_paging_each(&blk) + unless block_given? + raise ArgumentError.new("A block must be given to ##{__method__}") + end + + page = self + loop do + page.data&.each(&blk) + + break unless page.next_page? + page = page.next_page + end + end + + # @api private + # + # @param client [OpenAI::Internal::Transport::BaseClient] + # @param req [Hash{Symbol=>Object}] + # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param page_data [Array] + def initialize(client:, req:, headers:, page_data:) + super + + case page_data + in {data: Array => data} + @data = data.map { OpenAI::Internal::Type::Converter.coerce(@model, _1) } + else + end + @object = page_data[:object] + end + + # @api private + # + # @return [String] + def inspect + model = OpenAI::Internal::Type::Converter.inspect(@model, depth: 1) + + "#<#{self.class}[#{model}]:0x#{object_id.to_s(16)} object=#{object.inspect}>" + end + end + end +end diff --git a/lib/openai/internal/stream.rb b/lib/openai/internal/stream.rb new file mode 100644 index 00000000..3908f10f --- /dev/null +++ b/lib/openai/internal/stream.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + # @generic Elem + # + # @example + # stream.each do |event| + # puts(event) + # end + class Stream + include OpenAI::Internal::Type::BaseStream + + # @api private + # + # @return [Enumerable>] + private def iterator + # rubocop:disable Metrics/BlockLength + @iterator ||= OpenAI::Internal::Util.chain_fused(@stream) do |y| + consume = false + + @stream.each do |msg| + next if consume + + case msg + in {data: String => data} if data.start_with?("[DONE]") + consume = true + next + in {data: String => data} + case JSON.parse(data, symbolize_names: true) + in {error: error} + message = + case error + in String + error + in {message: String => m} + m + else + "An error occurred during streaming" + end + OpenAI::Errors::APIError.for( + url: @url, + status: @status, + body: body, + request: nil, + response: @response, + message: message + ) + in decoded + unwrapped = OpenAI::Internal::Util.dig(decoded, @unwrap) + y << OpenAI::Internal::Type::Converter.coerce(@model, unwrapped) + end + else + end + end + end + # rubocop:enable Metrics/BlockLength + end + end + end +end diff --git a/lib/openai/internal/transport/base_client.rb b/lib/openai/internal/transport/base_client.rb new file mode 100644 index 00000000..ca1b145f --- /dev/null +++ b/lib/openai/internal/transport/base_client.rb @@ -0,0 +1,563 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Transport + # @api private + # + # @abstract + class BaseClient + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + # from whatwg fetch spec + MAX_REDIRECTS = 20 + + # rubocop:disable Style/MutableConstant + PLATFORM_HEADERS = + { + "x-stainless-arch" => OpenAI::Internal::Util.arch, + "x-stainless-lang" => "ruby", + "x-stainless-os" => OpenAI::Internal::Util.os, + "x-stainless-package-version" => OpenAI::VERSION, + "x-stainless-runtime" => ::RUBY_ENGINE, + "x-stainless-runtime-version" => ::RUBY_ENGINE_VERSION + } + # rubocop:enable Style/MutableConstant + + class << self + # @api private + # + # @param req [Hash{Symbol=>Object}] + # + # @raise [ArgumentError] + def validate!(req) + keys = [:method, :path, :query, :headers, :body, :unwrap, :page, :stream, :model, :options] + case req + in Hash + req.each_key do |k| + unless keys.include?(k) + raise ArgumentError.new("Request `req` keys must be one of #{keys}, got #{k.inspect}") + end + end + else + raise ArgumentError.new("Request `req` must be a Hash or RequestOptions, got #{req.inspect}") + end + end + + # @api private + # + # @param status [Integer] + # @param headers [Hash{String=>String}, Net::HTTPHeader] + # + # @return [Boolean] + def should_retry?(status, headers:) + coerced = OpenAI::Internal::Util.coerce_boolean(headers["x-should-retry"]) + case [coerced, status] + in [true | false, _] + coerced + in [_, 408 | 409 | 429 | (500..)] + # retry on: + # 408: timeouts + # 409: locks + # 429: rate limits + # 500+: unknown errors + true + else + false + end + end + + # @api private + # + # @param request [Hash{Symbol=>Object}] . + # + # @option request [Symbol] :method + # + # @option request [URI::Generic] :url + # + # @option request [Hash{String=>String}] :headers + # + # @option request [Object] :body + # + # @option request [Integer] :max_retries + # + # @option request [Float] :timeout + # + # @param status [Integer] + # + # @param response_headers [Hash{String=>String}, Net::HTTPHeader] + # + # @return [Hash{Symbol=>Object}] + def follow_redirect(request, status:, response_headers:) + method, url, headers = request.fetch_values(:method, :url, :headers) + location = + Kernel.then do + URI.join(url, response_headers["location"]) + rescue ArgumentError + message = "Server responded with status #{status} but no valid location header." + raise OpenAI::Errors::APIConnectionError.new( + url: url, + response: response_headers, + message: message + ) + end + + request = {**request, url: location} + + case [url.scheme, location.scheme] + in ["https", "http"] + message = "Tried to redirect to a insecure URL" + raise OpenAI::Errors::APIConnectionError.new( + url: url, + response: response_headers, + message: message + ) + else + nil + end + + # from whatwg fetch spec + case [status, method] + in [301 | 302, :post] | [303, _] + drop = %w[content-encoding content-language content-length content-location content-type] + request = { + **request, + method: method == :head ? :head : :get, + headers: headers.except(*drop), + body: nil + } + else + end + + # from undici + if OpenAI::Internal::Util.uri_origin(url) != OpenAI::Internal::Util.uri_origin(location) + drop = %w[authorization cookie host proxy-authorization] + request = {**request, headers: request.fetch(:headers).except(*drop)} + end + + request + end + + # @api private + # + # @param status [Integer, OpenAI::Errors::APIConnectionError] + # @param stream [Enumerable, nil] + def reap_connection!(status, stream:) + case status + in (..199) | (300..499) + stream&.each { next } + in OpenAI::Errors::APIConnectionError | (500..) + OpenAI::Internal::Util.close_fused!(stream) + else + end + end + end + + # @return [URI::Generic] + attr_reader :base_url + + # @return [Float] + attr_reader :timeout + + # @return [Integer] + attr_reader :max_retries + + # @return [Float] + attr_reader :initial_retry_delay + + # @return [Float] + attr_reader :max_retry_delay + + # @return [Hash{String=>String}] + attr_reader :headers + + # @return [String, nil] + attr_reader :idempotency_header + + # @api private + # @return [OpenAI::Internal::Transport::PooledNetRequester] + attr_reader :requester + + # @api private + # + # @param base_url [String] + # @param timeout [Float] + # @param max_retries [Integer] + # @param initial_retry_delay [Float] + # @param max_retry_delay [Float] + # @param headers [Hash{String=>String, Integer, Array, nil}] + # @param idempotency_header [String, nil] + def initialize( + base_url:, + timeout: 0.0, + max_retries: 0, + initial_retry_delay: 0.0, + max_retry_delay: 0.0, + headers: {}, + idempotency_header: nil + ) + @requester = OpenAI::Internal::Transport::PooledNetRequester.new + @headers = OpenAI::Internal::Util.normalized_headers( + self.class::PLATFORM_HEADERS, + { + "accept" => "application/json", + "content-type" => "application/json" + }, + headers + ) + @base_url_components = OpenAI::Internal::Util.parse_uri(base_url) + @base_url = OpenAI::Internal::Util.unparse_uri(@base_url_components) + @idempotency_header = idempotency_header&.to_s&.downcase + @timeout = timeout + @max_retries = max_retries + @initial_retry_delay = initial_retry_delay + @max_retry_delay = max_retry_delay + end + + # @api private + # + # @return [Hash{String=>String}] + private def auth_headers = {} + + # @api private + # + # @return [String] + private def generate_idempotency_key = "stainless-ruby-retry-#{SecureRandom.uuid}" + + # @api private + # + # @param req [Hash{Symbol=>Object}] . + # + # @option req [Symbol] :method + # + # @option req [String, Array] :path + # + # @option req [Hash{String=>Array, String, nil}, nil] :query + # + # @option req [Hash{String=>String, Integer, Array, nil}, nil] :headers + # + # @option req [Object, nil] :body + # + # @option req [Symbol, Integer, Array, Proc, nil] :unwrap + # + # @option req [Class, nil] :page + # + # @option req [Class, nil] :stream + # + # @option req [OpenAI::Internal::Type::Converter, Class, nil] :model + # + # @param opts [Hash{Symbol=>Object}] . + # + # @option opts [String, nil] :idempotency_key + # + # @option opts [Hash{String=>Array, String, nil}, nil] :extra_query + # + # @option opts [Hash{String=>String, nil}, nil] :extra_headers + # + # @option opts [Object, nil] :extra_body + # + # @option opts [Integer, nil] :max_retries + # + # @option opts [Float, nil] :timeout + # + # @return [Hash{Symbol=>Object}] + private def build_request(req, opts) + method, uninterpolated_path = req.fetch_values(:method, :path) + + path = OpenAI::Internal::Util.interpolate_path(uninterpolated_path) + + query = OpenAI::Internal::Util.deep_merge(req[:query].to_h, opts[:extra_query].to_h) + + headers = OpenAI::Internal::Util.normalized_headers( + @headers, + auth_headers, + req[:headers].to_h, + opts[:extra_headers].to_h + ) + + if @idempotency_header && + !headers.key?(@idempotency_header) && + (!Net::HTTP::IDEMPOTENT_METHODS_.include?(method.to_s.upcase) || opts.key?(:idempotency_key)) + headers[@idempotency_header] = opts.fetch(:idempotency_key) { generate_idempotency_key } + end + + unless headers.key?("x-stainless-retry-count") + headers["x-stainless-retry-count"] = "0" + end + + timeout = opts.fetch(:timeout, @timeout).to_f.clamp(0..) + unless headers.key?("x-stainless-timeout") || timeout.zero? + headers["x-stainless-timeout"] = timeout.to_s + end + + headers.reject! { |_, v| v.to_s.empty? } + + body = + case method + in :get | :head | :options | :trace + nil + else + OpenAI::Internal::Util.deep_merge(*[req[:body], opts[:extra_body]].compact) + end + + url = OpenAI::Internal::Util.join_parsed_uri( + @base_url_components, + {**req, path: path, query: query} + ) + headers, encoded = OpenAI::Internal::Util.encode_content(headers, body) + { + method: method, + url: url, + headers: headers, + body: encoded, + max_retries: opts.fetch(:max_retries, @max_retries), + timeout: timeout + } + end + + # @api private + # + # @param headers [Hash{String=>String}] + # @param retry_count [Integer] + # + # @return [Float] + private def retry_delay(headers, retry_count:) + # Non-standard extension + span = Float(headers["retry-after-ms"], exception: false)&.then { _1 / 1000 } + return span if span + + retry_header = headers["retry-after"] + return span if (span = Float(retry_header, exception: false)) + + span = retry_header&.then do + Time.httpdate(_1) - Time.now + rescue ArgumentError + nil + end + return span if span + + scale = retry_count**2 + jitter = 1 - (0.25 * rand) + (@initial_retry_delay * scale * jitter).clamp(0, @max_retry_delay) + end + + # @api private + # + # @param request [Hash{Symbol=>Object}] . + # + # @option request [Symbol] :method + # + # @option request [URI::Generic] :url + # + # @option request [Hash{String=>String}] :headers + # + # @option request [Object] :body + # + # @option request [Integer] :max_retries + # + # @option request [Float] :timeout + # + # @param redirect_count [Integer] + # + # @param retry_count [Integer] + # + # @param send_retry_header [Boolean] + # + # @raise [OpenAI::Errors::APIError] + # @return [Array(Integer, Net::HTTPResponse, Enumerable)] + def send_request(request, redirect_count:, retry_count:, send_retry_header:) + url, headers, max_retries, timeout = request.fetch_values(:url, :headers, :max_retries, :timeout) + input = {**request.except(:timeout), deadline: OpenAI::Internal::Util.monotonic_secs + timeout} + + if send_retry_header + headers["x-stainless-retry-count"] = retry_count.to_s + end + + begin + status, response, stream = @requester.execute(input) + rescue OpenAI::Errors::APIConnectionError => e + status = e + end + + case status + in ..299 + [status, response, stream] + in 300..399 if redirect_count >= self.class::MAX_REDIRECTS + self.class.reap_connection!(status, stream: stream) + + message = "Failed to complete the request within #{self.class::MAX_REDIRECTS} redirects." + raise OpenAI::Errors::APIConnectionError.new(url: url, response: response, message: message) + in 300..399 + self.class.reap_connection!(status, stream: stream) + + request = self.class.follow_redirect(request, status: status, response_headers: response) + send_request( + request, + redirect_count: redirect_count + 1, + retry_count: retry_count, + send_retry_header: send_retry_header + ) + in OpenAI::Errors::APIConnectionError if retry_count >= max_retries + raise status + in (400..) if retry_count >= max_retries || !self.class.should_retry?(status, headers: response) + decoded = Kernel.then do + OpenAI::Internal::Util.decode_content(response, stream: stream, suppress_error: true) + ensure + self.class.reap_connection!(status, stream: stream) + end + + raise OpenAI::Errors::APIStatusError.for( + url: url, + status: status, + body: decoded, + request: nil, + response: response + ) + in (400..) | OpenAI::Errors::APIConnectionError + self.class.reap_connection!(status, stream: stream) + + delay = retry_delay(response || {}, retry_count: retry_count) + sleep(delay) + + send_request( + request, + redirect_count: redirect_count, + retry_count: retry_count + 1, + send_retry_header: send_retry_header + ) + end + end + + # Execute the request specified by `req`. This is the method that all resource + # methods call into. + # + # @overload request(method, path, query: {}, headers: {}, body: nil, unwrap: nil, page: nil, stream: nil, model: OpenAI::Internal::Type::Unknown, options: {}) + # + # @param method [Symbol] + # + # @param path [String, Array] + # + # @param query [Hash{String=>Array, String, nil}, nil] + # + # @param headers [Hash{String=>String, Integer, Array, nil}, nil] + # + # @param body [Object, nil] + # + # @param unwrap [Symbol, Integer, Array, Proc, nil] + # + # @param page [Class, nil] + # + # @param stream [Class, nil] + # + # @param model [OpenAI::Internal::Type::Converter, Class, nil] + # + # @param options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] . + # + # @option options [String, nil] :idempotency_key + # + # @option options [Hash{String=>Array, String, nil}, nil] :extra_query + # + # @option options [Hash{String=>String, nil}, nil] :extra_headers + # + # @option options [Object, nil] :extra_body + # + # @option options [Integer, nil] :max_retries + # + # @option options [Float, nil] :timeout + # + # @raise [OpenAI::Errors::APIError] + # @return [Object] + def request(req) + self.class.validate!(req) + model = req.fetch(:model) { OpenAI::Internal::Type::Unknown } + opts = req[:options].to_h + unwrap = req[:unwrap] + OpenAI::RequestOptions.validate!(opts) + request = build_request(req.except(:options), opts) + url = request.fetch(:url) + + # Don't send the current retry count in the headers if the caller modified the header defaults. + send_retry_header = request.fetch(:headers)["x-stainless-retry-count"] == "0" + status, response, stream = send_request( + request, + redirect_count: 0, + retry_count: 0, + send_retry_header: send_retry_header + ) + + decoded = OpenAI::Internal::Util.decode_content(response, stream: stream) + case req + in {stream: Class => st} + st.new( + model: model, + url: url, + status: status, + response: response, + unwrap: unwrap, + stream: decoded + ) + in {page: Class => page} + page.new(client: self, req: req, headers: response, page_data: decoded) + else + unwrapped = OpenAI::Internal::Util.dig(decoded, unwrap) + OpenAI::Internal::Type::Converter.coerce(model, unwrapped) + end + end + + # @api private + # + # @return [String] + def inspect + # rubocop:disable Layout/LineLength + "#<#{self.class.name}:0x#{object_id.to_s(16)} base_url=#{@base_url} max_retries=#{@max_retries} timeout=#{@timeout}>" + # rubocop:enable Layout/LineLength + end + + define_sorbet_constant!(:RequestComponents) do + T.type_alias do + { + method: Symbol, + path: T.any(String, T::Array[String]), + query: T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))]), + headers: T.nilable( + T::Hash[String, + T.nilable( + T.any( + String, + Integer, + T::Array[T.nilable(T.any(String, Integer))] + ) + )] + ), + body: T.nilable(T.anything), + unwrap: T.nilable( + T.any( + Symbol, + Integer, + T::Array[T.any(Symbol, Integer)], + T.proc.params(arg0: T.anything).returns(T.anything) + ) + ), + page: T.nilable(T::Class[OpenAI::Internal::Type::BasePage[OpenAI::Internal::Type::BaseModel]]), + stream: T.nilable(T::Class[OpenAI::Internal::Type::BaseStream[T.anything, OpenAI::Internal::Type::BaseModel]]), + model: T.nilable(OpenAI::Internal::Type::Converter::Input), + options: T.nilable(OpenAI::RequestOptions::OrHash) + } + end + end + define_sorbet_constant!(:RequestInput) do + T.type_alias do + { + method: Symbol, + url: URI::Generic, + headers: T::Hash[String, String], + body: T.anything, + max_retries: Integer, + timeout: Float + } + end + end + end + end + end +end diff --git a/lib/openai/internal/transport/pooled_net_requester.rb b/lib/openai/internal/transport/pooled_net_requester.rb new file mode 100644 index 00000000..0736b441 --- /dev/null +++ b/lib/openai/internal/transport/pooled_net_requester.rb @@ -0,0 +1,201 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Transport + # @api private + class PooledNetRequester + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + # from the golang stdlib + # https://github.com/golang/go/blob/c8eced8580028328fde7c03cbfcb720ce15b2358/src/net/http/transport.go#L49 + KEEP_ALIVE_TIMEOUT = 30 + + DEFAULT_MAX_CONNECTIONS = [Etc.nprocessors, 99].max + + class << self + # @api private + # + # @param url [URI::Generic] + # + # @return [Net::HTTP] + def connect(url) + port = + case [url.port, url.scheme] + in [Integer, _] + url.port + in [nil, "http" | "ws"] + Net::HTTP.http_default_port + in [nil, "https" | "wss"] + Net::HTTP.https_default_port + end + + Net::HTTP.new(url.host, port).tap do + _1.use_ssl = %w[https wss].include?(url.scheme) + _1.max_retries = 0 + end + end + + # @api private + # + # @param conn [Net::HTTP] + # @param deadline [Float] + def calibrate_socket_timeout(conn, deadline) + timeout = deadline - OpenAI::Internal::Util.monotonic_secs + conn.open_timeout = conn.read_timeout = conn.write_timeout = conn.continue_timeout = timeout + end + + # @api private + # + # @param request [Hash{Symbol=>Object}] . + # + # @option request [Symbol] :method + # + # @option request [URI::Generic] :url + # + # @option request [Hash{String=>String}] :headers + # + # @param blk [Proc] + # + # @yieldparam [String] + # @return [Array(Net::HTTPGenericRequest, Proc)] + def build_request(request, &blk) + method, url, headers, body = request.fetch_values(:method, :url, :headers, :body) + req = Net::HTTPGenericRequest.new( + method.to_s.upcase, + !body.nil?, + method != :head, + URI(url.to_s) # ensure we construct a URI class of the right scheme + ) + + headers.each { req[_1] = _2 } + + case body + in nil + nil + in String + req["content-length"] ||= body.bytesize.to_s unless req["transfer-encoding"] + req.body_stream = OpenAI::Internal::Util::ReadIOAdapter.new(body, &blk) + in StringIO + req["content-length"] ||= body.size.to_s unless req["transfer-encoding"] + req.body_stream = OpenAI::Internal::Util::ReadIOAdapter.new(body, &blk) + in Pathname | IO | Enumerator + req["transfer-encoding"] ||= "chunked" unless req["content-length"] + req.body_stream = OpenAI::Internal::Util::ReadIOAdapter.new(body, &blk) + end + + [req, req.body_stream&.method(:close)] + end + end + + # @api private + # + # @param url [URI::Generic] + # @param deadline [Float] + # @param blk [Proc] + # + # @raise [Timeout::Error] + # @yieldparam [Net::HTTP] + private def with_pool(url, deadline:, &blk) + origin = OpenAI::Internal::Util.uri_origin(url) + timeout = deadline - OpenAI::Internal::Util.monotonic_secs + pool = + @mutex.synchronize do + @pools[origin] ||= ConnectionPool.new(size: @size) do + self.class.connect(url) + end + end + + pool.with(timeout: timeout, &blk) + end + + # @api private + # + # @param request [Hash{Symbol=>Object}] . + # + # @option request [Symbol] :method + # + # @option request [URI::Generic] :url + # + # @option request [Hash{String=>String}] :headers + # + # @option request [Object] :body + # + # @option request [Float] :deadline + # + # @return [Array(Integer, Net::HTTPResponse, Enumerable)] + def execute(request) + url, deadline = request.fetch_values(:url, :deadline) + + req = nil + eof = false + finished = false + closing = nil + + # rubocop:disable Metrics/BlockLength + enum = Enumerator.new do |y| + with_pool(url, deadline: deadline) do |conn| + next if finished + + req, closing = self.class.build_request(request) do + self.class.calibrate_socket_timeout(conn, deadline) + end + + self.class.calibrate_socket_timeout(conn, deadline) + unless conn.started? + conn.keep_alive_timeout = self.class::KEEP_ALIVE_TIMEOUT + conn.start + end + + self.class.calibrate_socket_timeout(conn, deadline) + conn.request(req) do |rsp| + y << [conn, req, rsp] + break if finished + + rsp.read_body do |bytes| + y << bytes.force_encoding(Encoding::BINARY) + break if finished + + self.class.calibrate_socket_timeout(conn, deadline) + end + eof = true + end + end + rescue Timeout::Error + raise OpenAI::Errors::APITimeoutError.new(url: url, request: req) + rescue StandardError + raise OpenAI::Errors::APIConnectionError.new(url: url, request: req) + end + # rubocop:enable Metrics/BlockLength + + conn, _, response = enum.next + body = OpenAI::Internal::Util.fused_enum(enum, external: true) do + finished = true + tap do + enum.next + rescue StopIteration + nil + end + ensure + conn.finish if !eof && conn&.started? + closing&.call + end + [Integer(response.code), response, body] + end + + # @api private + # + # @param size [Integer] + def initialize(size: self.class::DEFAULT_MAX_CONNECTIONS) + @mutex = Mutex.new + @size = size + @pools = {} + end + + define_sorbet_constant!(:Request) do + T.type_alias { {method: Symbol, url: URI::Generic, headers: T::Hash[String, String], body: T.anything, deadline: Float} } + end + end + end + end +end diff --git a/lib/openai/internal/type/array_of.rb b/lib/openai/internal/type/array_of.rb new file mode 100644 index 00000000..b43a8c49 --- /dev/null +++ b/lib/openai/internal/type/array_of.rb @@ -0,0 +1,168 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + # + # @abstract + # + # @generic Elem + # + # Array of items of a given type. + class ArrayOf + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + private_class_method :new + + # @overload [](type_info, spec = {}) + # + # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] + # + # @param spec [Hash{Symbol=>Object}] . + # + # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option spec [Proc] :enum + # + # @option spec [Proc] :union + # + # @option spec [Boolean] :"nil?" + # + # @return [self] + def self.[](...) = new(...) + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def ===(other) = other.is_a?(Array) && other.all?(item_type) + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def ==(other) + # rubocop:disable Layout/LineLength + other.is_a?(OpenAI::Internal::Type::ArrayOf) && other.nilable? == nilable? && other.item_type == item_type + # rubocop:enable Layout/LineLength + end + + # @api public + # + # @return [Integer] + def hash = [self.class, item_type].hash + + # @api private + # + # @param value [Array, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :translate_names + # + # @option state [Boolean] :strictness + # + # @option state [Hash{Symbol=>Object}] :exactness + # + # @option state [Class] :error + # + # @option state [Integer] :branched + # + # @return [Array, Object] + def coerce(value, state:) + exactness = state.fetch(:exactness) + + unless value.is_a?(Array) + exactness[:no] += 1 + state[:error] = TypeError.new("#{value.class} can't be coerced into #{Array}") + return value + end + + target = item_type + exactness[:yes] += 1 + value + .map do |item| + case [nilable?, item] + in [true, nil] + exactness[:yes] += 1 + nil + else + OpenAI::Internal::Type::Converter.coerce(target, item, state: state) + end + end + end + + # @api private + # + # @param value [Array, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :can_retry + # + # @return [Array, Object] + def dump(value, state:) + target = item_type + if value.is_a?(Array) + value.map do + OpenAI::Internal::Type::Converter.dump(target, _1, state: state) + end + else + super + end + end + + # @api private + # + # @return [Object] + def to_sorbet_type + T::Array[OpenAI::Internal::Util::SorbetRuntimeSupport.to_sorbet_type(item_type)] + end + + # @api private + # + # @return [generic] + protected def item_type = @item_type_fn.call + + # @api private + # + # @return [Boolean] + protected def nilable? = @nilable + + # @api private + # + # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] + # + # @param spec [Hash{Symbol=>Object}] . + # + # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option spec [Proc] :enum + # + # @option spec [Proc] :union + # + # @option spec [Boolean] :"nil?" + def initialize(type_info, spec = {}) + @item_type_fn = OpenAI::Internal::Type::Converter.type_info(type_info || spec) + @meta = OpenAI::Internal::Type::Converter.meta_info(type_info, spec) + @nilable = spec.fetch(:nil?, false) + end + + # @api private + # + # @param depth [Integer] + # + # @return [String] + def inspect(depth: 0) + items = OpenAI::Internal::Type::Converter.inspect(item_type, depth: depth.succ) + + "#{self.class}[#{[items, nilable? ? 'nil' : nil].compact.join(' | ')}]" + end + end + end + end +end diff --git a/lib/openai/internal/type/base_model.rb b/lib/openai/internal/type/base_model.rb new file mode 100644 index 00000000..791db452 --- /dev/null +++ b/lib/openai/internal/type/base_model.rb @@ -0,0 +1,531 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @abstract + class BaseModel + extend OpenAI::Internal::Type::Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + class << self + # @api private + # + # Assumes superclass fields are totally defined before fields are accessed / + # defined on subclasses. + # + # @param child [Class] + def inherited(child) + super + child.known_fields.replace(known_fields.dup) + end + + # @api private + # + # @return [Hash{Symbol=>Hash{Symbol=>Object}}] + def known_fields = @known_fields ||= {} + + # @api private + # + # @return [Hash{Symbol=>Hash{Symbol=>Object}}] + def fields + known_fields.transform_values do |field| + {**field.except(:type_fn), type: field.fetch(:type_fn).call} + end + end + + # @api private + # + # @param name_sym [Symbol] + # + # @param required [Boolean] + # + # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] + # + # @param spec [Hash{Symbol=>Object}] . + # + # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option spec [Proc] :enum + # + # @option spec [Proc] :union + # + # @option spec [Boolean] :"nil?" + private def add_field(name_sym, required:, type_info:, spec:) + meta = OpenAI::Internal::Type::Converter.meta_info(type_info, spec) + type_fn, info = + case type_info + in Proc | OpenAI::Internal::Type::Converter | Class + [OpenAI::Internal::Type::Converter.type_info({**spec, union: type_info}), spec] + in Hash + [OpenAI::Internal::Type::Converter.type_info(type_info), type_info] + end + + setter = :"#{name_sym}=" + api_name = info.fetch(:api_name, name_sym) + nilable = info.fetch(:nil?, false) + const = required && !nilable ? info.fetch(:const, OpenAI::Internal::OMIT) : OpenAI::Internal::OMIT + + [name_sym, setter].each { undef_method(_1) } if known_fields.key?(name_sym) + + known_fields[name_sym] = + { + mode: @mode, + api_name: api_name, + required: required, + nilable: nilable, + const: const, + type_fn: type_fn, + meta: meta + } + + define_method(setter) do |value| + target = type_fn.call + state = OpenAI::Internal::Type::Converter.new_coerce_state(translate_names: false) + coerced = OpenAI::Internal::Type::Converter.coerce(target, value, state: state) + status = @coerced.store(name_sym, state.fetch(:error) || true) + stored = + case [target, status] + in [OpenAI::Internal::Type::Converter | Symbol, true] + coerced + else + value + end + @data.store(name_sym, stored) + end + + # rubocop:disable Style/CaseEquality + # rubocop:disable Metrics/BlockLength + define_method(name_sym) do + target = type_fn.call + + case @coerced[name_sym] + in true | false if OpenAI::Internal::Type::Converter === target + @data.fetch(name_sym) + in ::StandardError => e + raise OpenAI::Errors::ConversionError.new( + on: self.class, + method: __method__, + target: target, + value: @data.fetch(name_sym), + cause: e + ) + else + Kernel.then do + value = @data.fetch(name_sym) { const == OpenAI::Internal::OMIT ? nil : const } + state = OpenAI::Internal::Type::Converter.new_coerce_state(translate_names: false) + if (nilable || !required) && value.nil? + nil + else + OpenAI::Internal::Type::Converter.coerce( + target, value, state: state + ) + end + rescue StandardError => e + raise OpenAI::Errors::ConversionError.new( + on: self.class, + method: __method__, + target: target, + value: value, + cause: e + ) + end + end + end + # rubocop:enable Metrics/BlockLength + # rubocop:enable Style/CaseEquality + end + + # @api private + # + # @param name_sym [Symbol] + # + # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] + # + # @param spec [Hash{Symbol=>Object}] . + # + # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option spec [Proc] :enum + # + # @option spec [Proc] :union + # + # @option spec [Boolean] :"nil?" + def required(name_sym, type_info, spec = {}) + add_field(name_sym, required: true, type_info: type_info, spec: spec) + end + + # @api private + # + # @param name_sym [Symbol] + # + # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] + # + # @param spec [Hash{Symbol=>Object}] . + # + # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option spec [Proc] :enum + # + # @option spec [Proc] :union + # + # @option spec [Boolean] :"nil?" + def optional(name_sym, type_info, spec = {}) + add_field(name_sym, required: false, type_info: type_info, spec: spec) + end + + # @api private + # + # `request_only` attributes not excluded from `.#coerce` when receiving responses + # even if well behaved servers should not send them + # + # @param blk [Proc] + private def request_only(&blk) + @mode = :dump + blk.call + ensure + @mode = nil + end + + # @api private + # + # `response_only` attributes are omitted from `.#dump` when making requests + # + # @param blk [Proc] + private def response_only(&blk) + @mode = :coerce + blk.call + ensure + @mode = nil + end + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def ==(other) + other.is_a?(Class) && other <= OpenAI::Internal::Type::BaseModel && other.fields == fields + end + + # @api public + # + # @return [Integer] + def hash = fields.hash + end + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def ==(other) = self.class == other.class && @data == other.to_h + + # @api public + # + # @return [Integer] + def hash = [self.class, @data].hash + + class << self + # @api private + # + # @param value [OpenAI::Internal::Type::BaseModel, Hash{Object=>Object}, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :translate_names + # + # @option state [Boolean] :strictness + # + # @option state [Hash{Symbol=>Object}] :exactness + # + # @option state [Class] :error + # + # @option state [Integer] :branched + # + # @return [self, Object] + def coerce(value, state:) + exactness = state.fetch(:exactness) + + if value.is_a?(self) + exactness[:yes] += 1 + return value + end + + unless (val = OpenAI::Internal::Util.coerce_hash(value)).is_a?(Hash) + exactness[:no] += 1 + state[:error] = TypeError.new("#{value.class} can't be coerced into #{Hash}") + return value + end + exactness[:yes] += 1 + + keys = val.keys.to_set + instance = new + data = instance.to_h + status = instance.instance_variable_get(:@coerced) + + # rubocop:disable Metrics/BlockLength + fields.each do |name, field| + mode, required, target = field.fetch_values(:mode, :required, :type) + api_name, nilable, const = field.fetch_values(:api_name, :nilable, :const) + src_name = state.fetch(:translate_names) ? api_name : name + + unless val.key?(src_name) + if required && mode != :dump && const == OpenAI::Internal::OMIT + exactness[nilable ? :maybe : :no] += 1 + else + exactness[:yes] += 1 + end + next + end + + item = val.fetch(src_name) + keys.delete(src_name) + + state[:error] = nil + converted = + if item.nil? && (nilable || !required) + exactness[nilable ? :yes : :maybe] += 1 + nil + else + coerced = OpenAI::Internal::Type::Converter.coerce(target, item, state: state) + case target + in OpenAI::Internal::Type::Converter | Symbol + coerced + else + item + end + end + + status.store(name, state.fetch(:error) || true) + data.store(name, converted) + end + # rubocop:enable Metrics/BlockLength + + keys.each { data.store(_1, val.fetch(_1)) } + instance + end + + # @api private + # + # @param value [self, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :can_retry + # + # @return [Hash{Object=>Object}, Object] + def dump(value, state:) + unless (coerced = OpenAI::Internal::Util.coerce_hash(value)).is_a?(Hash) + return super + end + + acc = {} + + coerced.each do |key, val| + name = key.is_a?(String) ? key.to_sym : key + case (field = known_fields[name]) + in nil + acc.store(name, super(val, state: state)) + else + api_name, mode, type_fn = field.fetch_values(:api_name, :mode, :type_fn) + case mode + in :coerce + next + else + target = type_fn.call + acc.store(api_name, OpenAI::Internal::Type::Converter.dump(target, val, state: state)) + end + end + end + + known_fields.each_value do |field| + api_name, mode, const = field.fetch_values(:api_name, :mode, :const) + next if mode == :coerce || acc.key?(api_name) || const == OpenAI::Internal::OMIT + acc.store(api_name, const) + end + + acc + end + + # @api private + # + # @return [Object] + def to_sorbet_type + self + end + end + + class << self + # @api private + # + # @param model [OpenAI::Internal::Type::BaseModel] + # @param convert [Boolean] + # + # @return [Hash{Symbol=>Object}] + def recursively_to_h(model, convert:) + rec = ->(x) do + case x + in OpenAI::Internal::Type::BaseModel + if convert + fields = x.class.known_fields + x.to_h.to_h do |key, val| + [key, rec.call(fields.key?(key) ? x.public_send(key) : val)] + rescue OpenAI::Errors::ConversionError + [key, rec.call(val)] + end + else + rec.call(x.to_h) + end + in Hash + x.transform_values(&rec) + in Array + x.map(&rec) + else + x + end + end + rec.call(model) + end + end + + # @api public + # + # Returns the raw value associated with the given key, if found. Otherwise, nil is + # returned. + # + # It is valid to lookup keys that are not in the API spec, for example to access + # undocumented features. This method does not parse response data into + # higher-level types. Lookup by anything other than a Symbol is an ArgumentError. + # + # @param key [Symbol] + # + # @return [Object, nil] + def [](key) + unless key.instance_of?(Symbol) + raise ArgumentError.new("Expected symbol key for lookup, got #{key.inspect}") + end + + @data[key] + end + + # @api public + # + # Returns a Hash of the data underlying this object. O(1) + # + # Keys are Symbols and values are the raw values from the response. The return + # value indicates which values were ever set on the object. i.e. there will be a + # key in this hash if they ever were, even if the set value was nil. + # + # This method is not recursive. The returned value is shared by the object, so it + # should not be mutated. + # + # @return [Hash{Symbol=>Object}] + def to_h = @data + + alias_method :to_hash, :to_h + + # @api public + # + # In addition to the behaviour of `#to_h`, this method will recursively call + # `#to_h` on nested models. + # + # @return [Hash{Symbol=>Object}] + def deep_to_h = self.class.recursively_to_h(@data, convert: false) + + # @param keys [Array, nil] + # + # @return [Hash{Symbol=>Object}] + # + # @example + # # `comparison_filter` is a `OpenAI::ComparisonFilter` + # comparison_filter => { + # key: key, + # type: type, + # value: value + # } + def deconstruct_keys(keys) + (keys || self.class.known_fields.keys) + .filter_map do |k| + unless self.class.known_fields.key?(k) + next + end + + [k, public_send(k)] + end + .to_h + end + + # @api public + # + # @param a [Object] + # + # @return [String] + def to_json(*a) = OpenAI::Internal::Type::Converter.dump(self.class, self).to_json(*a) + + # @api public + # + # @param a [Object] + # + # @return [String] + def to_yaml(*a) = OpenAI::Internal::Type::Converter.dump(self.class, self).to_yaml(*a) + + # Create a new instance of a model. + # + # @param data [Hash{Symbol=>Object}, self] + def initialize(data = {}) + @data = {} + @coerced = {} + OpenAI::Internal::Util.coerce_hash!(data).each do + if self.class.known_fields.key?(_1) + public_send(:"#{_1}=", _2) + else + @data.store(_1, _2) + @coerced.store(_1, false) + end + end + end + + class << self + # @api private + # + # @param depth [Integer] + # + # @return [String] + def inspect(depth: 0) + return super() if depth.positive? + + depth = depth.succ + deferred = fields.transform_values do |field| + type, required, nilable = field.fetch_values(:type, :required, :nilable) + inspected = [ + OpenAI::Internal::Type::Converter.inspect(type, depth: depth), + !required || nilable ? "nil" : nil + ].compact.join(" | ") + -> { inspected }.tap { _1.define_singleton_method(:inspect) { call } } + end + + "#{name}[#{deferred.inspect}]" + end + end + + # @api public + # + # @return [String] + def to_s = deep_to_h.to_s + + # @api private + # + # @return [String] + def inspect + converted = self.class.recursively_to_h(self, convert: true) + "#<#{self.class}:0x#{object_id.to_s(16)} #{converted}>" + end + + define_sorbet_constant!(:KnownField) do + T.type_alias { {mode: T.nilable(Symbol), required: T::Boolean, nilable: T::Boolean} } + end + end + end + end +end diff --git a/lib/openai/internal/type/base_page.rb b/lib/openai/internal/type/base_page.rb new file mode 100644 index 00000000..3ebca02d --- /dev/null +++ b/lib/openai/internal/type/base_page.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + # + # @generic Elem + # + # This module provides a base implementation for paginated responses in the SDK. + module BasePage + # rubocop:disable Lint/UnusedMethodArgument + + # @api public + # + # @return [Boolean] + def next_page? = (raise NotImplementedError) + + # @api public + # + # @raise [OpenAI::Errors::APIError] + # @return [self] + def next_page = (raise NotImplementedError) + + # @api public + # + # @param blk [Proc] + # + # @yieldparam [generic] + # @return [void] + def auto_paging_each(&blk) = (raise NotImplementedError) + + # @return [Enumerable>] + def to_enum = super(:auto_paging_each) + + alias_method :enum_for, :to_enum + + # @api private + # + # @param client [OpenAI::Internal::Transport::BaseClient] + # @param req [Hash{Symbol=>Object}] + # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param page_data [Object] + def initialize(client:, req:, headers:, page_data:) + @client = client + @req = req + @model = req.fetch(:model) + super() + end + + # rubocop:enable Lint/UnusedMethodArgument + end + end + end +end diff --git a/lib/openai/internal/type/base_stream.rb b/lib/openai/internal/type/base_stream.rb new file mode 100644 index 00000000..3ebdf248 --- /dev/null +++ b/lib/openai/internal/type/base_stream.rb @@ -0,0 +1,92 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + # + # @generic Elem + # + # This module provides a base implementation for streaming responses in the SDK. + # + # @see https://rubyapi.org/3.2/o/enumerable + module BaseStream + include Enumerable + + class << self + # Attempt to close the underlying transport when the stream itself is garbage + # collected. + # + # This should not be relied upon for resource clean up, as the garbage collector + # is not guaranteed to run. + # + # @param stream [Enumerable] + # + # @return [Proc] + # + # @see https://rubyapi.org/3.2/o/objectspace#method-c-define_finalizer + def defer_closing(stream) = ->(_id) { OpenAI::Internal::Util.close_fused!(stream) } + end + + # @api public + # + # @return [void] + def close = OpenAI::Internal::Util.close_fused!(@iterator) + + # @api private + # + # @return [Enumerable>] + private def iterator = (raise NotImplementedError) + + # @api public + # + # @param blk [Proc] + # + # @yieldparam [generic] + # @return [void] + def each(&blk) + unless block_given? + raise ArgumentError.new("A block must be given to ##{__method__}") + end + @iterator.each(&blk) + end + + # @api public + # + # @return [Enumerator>] + def to_enum = @iterator + + alias_method :enum_for, :to_enum + + # @api private + # + # @param model [Class, OpenAI::Internal::Type::Converter] + # @param url [URI::Generic] + # @param status [Integer] + # @param response [Net::HTTPResponse] + # @param unwrap [Symbol, Integer, Array, Proc] + # @param stream [Enumerable] + def initialize(model:, url:, status:, response:, unwrap:, stream:) + @model = model + @url = url + @status = status + @response = response + @unwrap = unwrap + @stream = stream + @iterator = iterator + + ObjectSpace.define_finalizer(self, OpenAI::Internal::Type::BaseStream.defer_closing(@stream)) + end + + # @api private + # + # @return [String] + def inspect + model = OpenAI::Internal::Type::Converter.inspect(@model, depth: 1) + + "#<#{self.class}[#{model}]:0x#{object_id.to_s(16)}>" + end + end + end + end +end diff --git a/lib/openai/internal/type/boolean.rb b/lib/openai/internal/type/boolean.rb new file mode 100644 index 00000000..9c903929 --- /dev/null +++ b/lib/openai/internal/type/boolean.rb @@ -0,0 +1,77 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + # + # @abstract + # + # Ruby has no Boolean class; this is something for models to refer to. + class Boolean + extend OpenAI::Internal::Type::Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + private_class_method :new + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def self.===(other) = other == true || other == false + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def self.==(other) = other.is_a?(Class) && other <= OpenAI::Internal::Type::Boolean + + class << self + # @api private + # + # Coerce value to Boolean if possible, otherwise return the original value. + # + # @param value [Boolean, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :translate_names + # + # @option state [Boolean] :strictness + # + # @option state [Hash{Symbol=>Object}] :exactness + # + # @option state [Class] :error + # + # @option state [Integer] :branched + # + # @return [Boolean, Object] + def coerce(value, state:) + state.fetch(:exactness)[value == true || value == false ? :yes : :no] += 1 + value + end + + # @!method dump(value, state:) + # @api private + # + # @param value [Boolean, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :can_retry + # + # @return [Boolean, Object] + + # @api private + # + # @return [Object] + def to_sorbet_type + T::Boolean + end + end + end + end + end +end diff --git a/lib/openai/internal/type/converter.rb b/lib/openai/internal/type/converter.rb new file mode 100644 index 00000000..0b31dfcf --- /dev/null +++ b/lib/openai/internal/type/converter.rb @@ -0,0 +1,327 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + module Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + # rubocop:disable Lint/UnusedMethodArgument + + # @api private + # + # @param value [Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :translate_names + # + # @option state [Boolean] :strictness + # + # @option state [Hash{Symbol=>Object}] :exactness + # + # @option state [Class] :error + # + # @option state [Integer] :branched + # + # @return [Object] + def coerce(value, state:) = (raise NotImplementedError) + + # @api private + # + # @param value [Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :can_retry + # + # @return [Object] + def dump(value, state:) + case value + in Array + value.map { OpenAI::Internal::Type::Unknown.dump(_1, state: state) } + in Hash + value.transform_values { OpenAI::Internal::Type::Unknown.dump(_1, state: state) } + in OpenAI::Internal::Type::BaseModel + value.class.dump(value, state: state) + in StringIO + value.string + in Pathname | IO + state[:can_retry] = false if value.is_a?(IO) + OpenAI::FilePart.new(value) + in OpenAI::FilePart + state[:can_retry] = false if value.content.is_a?(IO) + value + else + value + end + end + + # @api private + # + # @param depth [Integer] + # + # @return [String] + def inspect(depth: 0) + super() + end + + # rubocop:enable Lint/UnusedMethodArgument + + class << self + # @api private + # + # @param spec [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] . + # + # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option spec [Proc] :enum + # + # @option spec [Proc] :union + # + # @option spec [Boolean] :"nil?" + # + # @return [Proc] + def type_info(spec) + case spec + in Proc + spec + in Hash + type_info(spec.slice(:const, :enum, :union).first&.last) + in true | false + -> { OpenAI::Internal::Type::Boolean } + in OpenAI::Internal::Type::Converter | Class | Symbol + -> { spec } + in NilClass | Integer | Float + -> { spec.class } + end + end + + # @api private + # + # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] . + # + # @option type_info [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option type_info [Proc] :enum + # + # @option type_info [Proc] :union + # + # @option type_info [Boolean] :"nil?" + # + # @param spec [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] . + # + # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option spec [Proc] :enum + # + # @option spec [Proc] :union + # + # @option spec [Boolean] :"nil?" + # + # @return [Hash{Symbol=>Object}] + def meta_info(type_info, spec) + [spec, type_info].grep(Hash).first.to_h.except(:const, :enum, :union, :nil?) + end + + # @api private + # + # @param translate_names [Boolean] + # + # @return [Hash{Symbol=>Object}] + def new_coerce_state(translate_names: true) + { + translate_names: translate_names, + strictness: true, + exactness: {yes: 0, no: 0, maybe: 0}, + error: nil, + branched: 0 + } + end + + # @api private + # + # Based on `target`, transform `value` into `target`, to the extent possible: + # + # 1. if the given `value` conforms to `target` already, return the given `value` + # 2. if it's possible and safe to convert the given `value` to `target`, then the + # converted value + # 3. otherwise, the given `value` unaltered + # + # The coercion process is subject to improvement between minor release versions. + # See https://docs.pydantic.dev/latest/concepts/unions/#smart-mode + # + # @param target [OpenAI::Internal::Type::Converter, Class] + # + # @param value [Object] + # + # @param state [Hash{Symbol=>Object}] The `strictness` is one of `true`, `false`. This informs the coercion strategy + # when we have to decide between multiple possible conversion targets: + # + # - `true`: the conversion must be exact, with minimum coercion. + # - `false`: the conversion can be approximate, with some coercion. + # + # The `exactness` is `Hash` with keys being one of `yes`, `no`, or `maybe`. For + # any given conversion attempt, the exactness will be updated based on how closely + # the value recursively matches the target type: + # + # - `yes`: the value can be converted to the target type with minimum coercion. + # - `maybe`: the value can be converted to the target type with some reasonable + # coercion. + # - `no`: the value cannot be converted to the target type. + # + # See implementation below for more details. + # + # @option state [Boolean] :translate_names + # + # @option state [Boolean] :strictness + # + # @option state [Hash{Symbol=>Object}] :exactness + # + # @option state [Class] :error + # + # @option state [Integer] :branched + # + # @return [Object] + def coerce(target, value, state: OpenAI::Internal::Type::Converter.new_coerce_state) + # rubocop:disable Metrics/BlockNesting + exactness = state.fetch(:exactness) + + case target + in OpenAI::Internal::Type::Converter + return target.coerce(value, state: state) + in Class + if value.is_a?(target) + exactness[:yes] += 1 + return value + end + + case target + in -> { _1 <= NilClass } + exactness[value.nil? ? :yes : :maybe] += 1 + return nil + in -> { _1 <= Integer } + case value + in Integer + exactness[:yes] += 1 + return value + else + Kernel.then do + return Integer(value).tap { exactness[:maybe] += 1 } + rescue ArgumentError, TypeError => e + state[:error] = e + end + end + in -> { _1 <= Float } + if value.is_a?(Numeric) + exactness[:yes] += 1 + return Float(value) + else + Kernel.then do + return Float(value).tap { exactness[:maybe] += 1 } + rescue ArgumentError, TypeError => e + state[:error] = e + end + end + in -> { _1 <= String } + case value + in String | Symbol | Numeric + exactness[value.is_a?(Numeric) ? :maybe : :yes] += 1 + return value.to_s + in StringIO + exactness[:yes] += 1 + return value.string + else + state[:error] = TypeError.new("#{value.class} can't be coerced into #{String}") + end + in -> { _1 <= Date || _1 <= Time } + Kernel.then do + return target.parse(value).tap { exactness[:yes] += 1 } + rescue ArgumentError, TypeError => e + state[:error] = e + end + in -> { _1 <= StringIO } if value.is_a?(String) + exactness[:yes] += 1 + return StringIO.new(value.b) + else + end + in Symbol + case value + in Symbol | String + if value.to_sym == target + exactness[:yes] += 1 + return target + else + exactness[:maybe] += 1 + return value + end + else + message = "cannot convert non-matching #{value.class} into #{target.inspect}" + state[:error] = ArgumentError.new(message) + end + else + end + + exactness[:no] += 1 + value + # rubocop:enable Metrics/BlockNesting + end + + # @api private + # + # @param target [OpenAI::Internal::Type::Converter, Class] + # + # @param value [Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :can_retry + # + # @return [Object] + def dump(target, value, state: {can_retry: true}) + case target + in OpenAI::Internal::Type::Converter + target.dump(value, state: state) + else + OpenAI::Internal::Type::Unknown.dump(value, state: state) + end + end + + # @api private + # + # @param target [Object] + # @param depth [Integer] + # + # @return [String] + def inspect(target, depth:) + case target + in OpenAI::Internal::Type::Converter + target.inspect(depth: depth.succ) + else + target.inspect + end + end + end + + define_sorbet_constant!(:Input) do + T.type_alias { T.any(OpenAI::Internal::Type::Converter, T::Class[T.anything]) } + end + define_sorbet_constant!(:CoerceState) do + T.type_alias do + { + translate_names: T::Boolean, + strictness: T::Boolean, + exactness: {yes: Integer, no: Integer, maybe: Integer}, + error: T::Class[StandardError], + branched: Integer + } + end + end + define_sorbet_constant!(:DumpState) do + T.type_alias { {can_retry: T::Boolean} } + end + end + end + end +end diff --git a/lib/openai/internal/type/enum.rb b/lib/openai/internal/type/enum.rb new file mode 100644 index 00000000..70476264 --- /dev/null +++ b/lib/openai/internal/type/enum.rb @@ -0,0 +1,156 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + # + # A value from among a specified list of options. OpenAPI enum values map to Ruby + # values in the SDK as follows: + # + # 1. boolean => true | false + # 2. integer => Integer + # 3. float => Float + # 4. string => Symbol + # + # We can therefore convert string values to Symbols, but can't convert other + # values safely. + # + # @example + # # `chat_model` is a `OpenAI::ChatModel` + # case chat_model + # when OpenAI::ChatModel::GPT_5 + # # ... + # when OpenAI::ChatModel::GPT_5_MINI + # # ... + # when OpenAI::ChatModel::GPT_5_NANO + # # ... + # else + # puts(chat_model) + # end + # + # @example + # case chat_model + # in :"gpt-5" + # # ... + # in :"gpt-5-mini" + # # ... + # in :"gpt-5-nano" + # # ... + # else + # puts(chat_model) + # end + module Enum + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + # All of the valid Symbol values for this enum. + # + # @return [Array] + def values = constants.map { const_get(_1) } + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def ===(other) = values.include?(other) + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def ==(other) + # rubocop:disable Style/CaseEquality + OpenAI::Internal::Type::Enum === other && other.values.to_set == values.to_set + # rubocop:enable Style/CaseEquality + end + + # @api public + # + # @return [Integer] + def hash = values.to_set.hash + + # @api private + # + # Unlike with primitives, `Enum` additionally validates that the value is a member + # of the enum. + # + # @param value [String, Symbol, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :translate_names + # + # @option state [Boolean] :strictness + # + # @option state [Hash{Symbol=>Object}] :exactness + # + # @option state [Class] :error + # + # @option state [Integer] :branched + # + # @return [Symbol, Object] + def coerce(value, state:) + exactness = state.fetch(:exactness) + val = value.is_a?(String) ? value.to_sym : value + + if values.include?(val) + exactness[:yes] += 1 + val + elsif values.first&.class == val.class + exactness[:maybe] += 1 + value + else + exactness[:no] += 1 + state[:error] = TypeError.new("#{value.class} can't be coerced into #{self}") + value + end + end + + # @!method dump(value, state:) + # @api private + # + # @param value [Symbol, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :can_retry + # + # @return [Symbol, Object] + + # @api private + # + # @return [Object] + def to_sorbet_type + types = values.map { OpenAI::Internal::Util::SorbetRuntimeSupport.to_sorbet_type(_1) }.uniq + case types + in [] + T.noreturn + in [type] + type + else + T.any(*types) + end + end + + # @api private + # + # @param depth [Integer] + # + # @return [String] + def inspect(depth: 0) + if depth.positive? + return is_a?(Module) ? super() : self.class.name + end + + members = values.map { OpenAI::Internal::Type::Converter.inspect(_1, depth: depth.succ) } + prefix = is_a?(Module) ? name : self.class.name + + "#{prefix}[#{members.join(' | ')}]" + end + end + end + end +end diff --git a/lib/openai/internal/type/file_input.rb b/lib/openai/internal/type/file_input.rb new file mode 100644 index 00000000..a5dcdef7 --- /dev/null +++ b/lib/openai/internal/type/file_input.rb @@ -0,0 +1,108 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + # + # @abstract + # + # Either `Pathname` or `StringIO`, or `IO`, or + # `OpenAI::Internal::Type::FileInput`. + # + # Note: when `IO` is used, all retries are disabled, since many IO` streams are + # not rewindable. + class FileInput + extend OpenAI::Internal::Type::Converter + + private_class_method :new + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def self.===(other) + case other + in Pathname | StringIO | IO | String | OpenAI::FilePart + true + else + false + end + end + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def self.==(other) = other.is_a?(Class) && other <= OpenAI::Internal::Type::FileInput + + class << self + # @api private + # + # @param value [StringIO, String, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :translate_names + # + # @option state [Boolean] :strictness + # + # @option state [Hash{Symbol=>Object}] :exactness + # + # @option state [Class] :error + # + # @option state [Integer] :branched + # + # @return [StringIO, Object] + def coerce(value, state:) + exactness = state.fetch(:exactness) + case value + in String + exactness[:yes] += 1 + StringIO.new(value) + in StringIO + exactness[:yes] += 1 + value + else + state[:error] = TypeError.new("#{value.class} can't be coerced into #{StringIO}") + exactness[:no] += 1 + value + end + end + + # @api private + # + # @param value [Pathname, StringIO, IO, String, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :can_retry + # + # @return [Pathname, StringIO, IO, String, Object] + def dump(value, state:) + # rubocop:disable Lint/DuplicateBranch + case value + in IO + state[:can_retry] = false + in OpenAI::FilePart if value.content.is_a?(IO) + state[:can_retry] = false + else + end + # rubocop:enable Lint/DuplicateBranch + + value + end + + # @api private + # + # @return [Object] + def to_sorbet_type + T.any(Pathname, StringIO, IO, String, OpenAI::FilePart) + end + end + end + end + end +end diff --git a/lib/openai/internal/type/hash_of.rb b/lib/openai/internal/type/hash_of.rb new file mode 100644 index 00000000..4c4a00d0 --- /dev/null +++ b/lib/openai/internal/type/hash_of.rb @@ -0,0 +1,188 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + # + # @abstract + # + # @generic Elem + # + # Hash of items of a given type. + class HashOf + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + private_class_method :new + + # @overload [](type_info, spec = {}) + # + # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] + # + # @param spec [Hash{Symbol=>Object}] . + # + # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option spec [Proc] :enum + # + # @option spec [Proc] :union + # + # @option spec [Boolean] :"nil?" + # + # @return [self] + def self.[](...) = new(...) + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def ===(other) + type = item_type + case other + in Hash + other.all? do |key, val| + case [key, val] + in [Symbol | String, ^type] + true + else + false + end + end + else + false + end + end + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def ==(other) + # rubocop:disable Layout/LineLength + other.is_a?(OpenAI::Internal::Type::HashOf) && other.nilable? == nilable? && other.item_type == item_type + # rubocop:enable Layout/LineLength + end + + # @api public + # + # @return [Integer] + def hash = [self.class, item_type].hash + + # @api private + # + # @param value [Hash{Object=>Object}, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :translate_names + # + # @option state [Boolean] :strictness + # + # @option state [Hash{Symbol=>Object}] :exactness + # + # @option state [Class] :error + # + # @option state [Integer] :branched + # + # @return [Hash{Symbol=>Object}, Object] + def coerce(value, state:) + exactness = state.fetch(:exactness) + + unless value.is_a?(Hash) + exactness[:no] += 1 + state[:error] = TypeError.new("#{value.class} can't be coerced into #{Hash}") + return value + end + + target = item_type + exactness[:yes] += 1 + value + .to_h do |key, val| + k = key.is_a?(String) ? key.to_sym : key + v = + case [nilable?, val] + in [true, nil] + exactness[:yes] += 1 + nil + else + OpenAI::Internal::Type::Converter.coerce(target, val, state: state) + end + + exactness[:no] += 1 unless k.is_a?(Symbol) + [k, v] + end + end + + # @api private + # + # @param value [Hash{Object=>Object}, Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :can_retry + # + # @return [Hash{Symbol=>Object}, Object] + def dump(value, state:) + target = item_type + if value.is_a?(Hash) + value.transform_values do + OpenAI::Internal::Type::Converter.dump(target, _1, state: state) + end + else + super + end + end + + # @api private + # + # @return [Object] + def to_sorbet_type + T::Hash[OpenAI::Internal::Util::SorbetRuntimeSupport.to_sorbet_type(item_type)] + end + + # @api private + # + # @return [generic] + protected def item_type = @item_type_fn.call + + # @api private + # + # @return [Boolean] + protected def nilable? = @nilable + + # @api private + # + # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] + # + # @param spec [Hash{Symbol=>Object}] . + # + # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option spec [Proc] :enum + # + # @option spec [Proc] :union + # + # @option spec [Boolean] :"nil?" + def initialize(type_info, spec = {}) + @item_type_fn = OpenAI::Internal::Type::Converter.type_info(type_info || spec) + @meta = OpenAI::Internal::Type::Converter.meta_info(type_info, spec) + @nilable = spec.fetch(:nil?, false) + end + + # @api private + # + # @param depth [Integer] + # + # @return [String] + def inspect(depth: 0) + items = OpenAI::Internal::Type::Converter.inspect(item_type, depth: depth.succ) + + "#{self.class}[#{[items, nilable? ? 'nil' : nil].compact.join(' | ')}]" + end + end + end + end +end diff --git a/lib/openai/internal/type/request_parameters.rb b/lib/openai/internal/type/request_parameters.rb new file mode 100644 index 00000000..d2546558 --- /dev/null +++ b/lib/openai/internal/type/request_parameters.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + module RequestParameters + # @!attribute request_options + # Options to specify HTTP behaviour for this request. + # + # @return [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # @param mod [Module] + def self.included(mod) + raise ArgumentError.new(mod) unless mod <= OpenAI::Internal::Type::BaseModel + + mod.optional(:request_options, OpenAI::RequestOptions) + end + + # @api private + module Converter + # @api private + # + # @param params [Object] + # + # @return [Array(Object, Hash{Symbol=>Object})] + def dump_request(params) + state = {can_retry: true} + case (dumped = dump(params, state: state)) + in Hash + options = OpenAI::Internal::Util.coerce_hash!(dumped[:request_options]).to_h + request_options = state.fetch(:can_retry) ? options : {**options, max_retries: 0} + [dumped.except(:request_options), request_options] + else + [dumped, nil] + end + end + end + end + end + end +end diff --git a/lib/openai/internal/type/union.rb b/lib/openai/internal/type/union.rb new file mode 100644 index 00000000..0199d301 --- /dev/null +++ b/lib/openai/internal/type/union.rb @@ -0,0 +1,258 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + # + # @example + # # `custom_tool_input_format` is a `OpenAI::CustomToolInputFormat` + # case custom_tool_input_format + # when OpenAI::CustomToolInputFormat::Text + # puts(custom_tool_input_format.type) + # when OpenAI::CustomToolInputFormat::Grammar + # puts(custom_tool_input_format.definition) + # else + # puts(custom_tool_input_format) + # end + # + # @example + # case custom_tool_input_format + # in {type: :text} + # # ... + # in {type: :grammar, definition: definition, syntax: syntax} + # puts(definition) + # else + # puts(custom_tool_input_format) + # end + module Union + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + # @api private + # + # All of the specified variant info for this union. + # + # @return [ArrayObject})>] + private def known_variants = (@known_variants ||= []) + + # @api private + # + # @return [ArrayObject})>] + protected def derefed_variants + known_variants.map { |key, variant_fn, meta| [key, variant_fn.call, meta] } + end + + # All of the specified variants for this union. + # + # @return [Array] + def variants = derefed_variants.map { _2 } + + # @api private + # + # @param property [Symbol] + private def discriminator(property) + case property + in Symbol + @discriminator = property + end + end + + # @api private + # + # @param key [Symbol, Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] + # + # @param spec [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] . + # + # @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const + # + # @option spec [Proc] :enum + # + # @option spec [Proc] :union + # + # @option spec [Boolean] :"nil?" + private def variant(key, spec = nil) + meta = OpenAI::Internal::Type::Converter.meta_info(nil, spec) + variant_info = + case key + in Symbol + [key, OpenAI::Internal::Type::Converter.type_info(spec), meta] + in Proc | OpenAI::Internal::Type::Converter | Class | Hash + [nil, OpenAI::Internal::Type::Converter.type_info(key), meta] + end + + known_variants << variant_info + end + + # @api private + # + # @param value [Object] + # + # @return [OpenAI::Internal::Type::Converter, Class, nil] + private def resolve_variant(value) + case [@discriminator, value] + in [_, OpenAI::Internal::Type::BaseModel] + value.class + in [Symbol, Hash] + key = value.fetch(@discriminator) do + value.fetch(@discriminator.to_s, OpenAI::Internal::OMIT) + end + + return nil if key == OpenAI::Internal::OMIT + + key = key.to_sym if key.is_a?(String) + _, found = known_variants.find { |k,| k == key } + found&.call + else + nil + end + end + + # rubocop:disable Style/HashEachMethods + # rubocop:disable Style/CaseEquality + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def ===(other) + known_variants.any? do |_, variant_fn| + variant_fn.call === other + end + end + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def ==(other) + OpenAI::Internal::Type::Union === other && other.derefed_variants == derefed_variants + end + + # @api public + # + # @return [Integer] + def hash = variants.hash + + # @api private + # + # Tries to efficiently coerce the given value to one of the known variants. + # + # If the value cannot match any of the known variants, the coercion is considered + # non-viable and returns the original value. + # + # @param value [Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :translate_names + # + # @option state [Boolean] :strictness + # + # @option state [Hash{Symbol=>Object}] :exactness + # + # @option state [Class] :error + # + # @option state [Integer] :branched + # + # @return [Object] + def coerce(value, state:) + if (target = resolve_variant(value)) + return OpenAI::Internal::Type::Converter.coerce(target, value, state: state) + end + + strictness = state.fetch(:strictness) + exactness = state.fetch(:exactness) + + alternatives = [] + known_variants.each do |_, variant_fn| + target = variant_fn.call + exact = state[:exactness] = {yes: 0, no: 0, maybe: 0} + state[:branched] += 1 + + coerced = OpenAI::Internal::Type::Converter.coerce(target, value, state: state) + yes, no, maybe = exact.values + if (no + maybe).zero? || (!strictness && yes.positive?) + exact.each { exactness[_1] += _2 } + state[:exactness] = exactness + return coerced + elsif maybe.positive? + alternatives << [[-yes, -maybe, no], exact, coerced] + end + end + + case alternatives.sort_by!(&:first) + in [] + exactness[:no] += 1 + state[:error] = ArgumentError.new("no matching variant for #{value.inspect}") + value + in [[_, exact, coerced], *] + exact.each { exactness[_1] += _2 } + coerced + end + .tap { state[:exactness] = exactness } + ensure + state[:strictness] = strictness + end + + # @api private + # + # @param value [Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :can_retry + # + # @return [Object] + def dump(value, state:) + if (target = resolve_variant(value)) + return OpenAI::Internal::Type::Converter.dump(target, value, state: state) + end + + known_variants.each do + target = _2.call + return OpenAI::Internal::Type::Converter.dump(target, value, state: state) if target === value + end + + super + end + + # @api private + # + # @return [Object] + def to_sorbet_type + types = variants.map { OpenAI::Internal::Util::SorbetRuntimeSupport.to_sorbet_type(_1) }.uniq + case types + in [] + T.noreturn + in [type] + type + else + T.any(*types) + end + end + + # rubocop:enable Style/CaseEquality + # rubocop:enable Style/HashEachMethods + + # @api private + # + # @param depth [Integer] + # + # @return [String] + def inspect(depth: 0) + if depth.positive? + return is_a?(Module) ? super() : self.class.name + end + + members = variants.map { OpenAI::Internal::Type::Converter.inspect(_1, depth: depth.succ) } + prefix = is_a?(Module) ? name : self.class.name + + "#{prefix}[#{members.join(' | ')}]" + end + end + end + end +end diff --git a/lib/openai/internal/type/unknown.rb b/lib/openai/internal/type/unknown.rb new file mode 100644 index 00000000..76f3a043 --- /dev/null +++ b/lib/openai/internal/type/unknown.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + module Type + # @api private + # + # @abstract + # + # When we don't know what to expect for the value. + class Unknown + extend OpenAI::Internal::Type::Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + # rubocop:disable Lint/UnusedMethodArgument + + private_class_method :new + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def self.===(other) = true + + # @api public + # + # @param other [Object] + # + # @return [Boolean] + def self.==(other) = other.is_a?(Class) && other <= OpenAI::Internal::Type::Unknown + + class << self + # @api private + # + # No coercion needed for Unknown type. + # + # @param value [Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :translate_names + # + # @option state [Boolean] :strictness + # + # @option state [Hash{Symbol=>Object}] :exactness + # + # @option state [Class] :error + # + # @option state [Integer] :branched + # + # @return [Object] + def coerce(value, state:) + state.fetch(:exactness)[:yes] += 1 + value + end + + # @!method dump(value, state:) + # @api private + # + # @param value [Object] + # + # @param state [Hash{Symbol=>Object}] . + # + # @option state [Boolean] :can_retry + # + # @return [Object] + + # @api private + # + # @return [Object] + def to_sorbet_type + T.anything + end + end + + # rubocop:enable Lint/UnusedMethodArgument + end + end + end +end diff --git a/lib/openai/internal/util.rb b/lib/openai/internal/util.rb new file mode 100644 index 00000000..bec08a64 --- /dev/null +++ b/lib/openai/internal/util.rb @@ -0,0 +1,914 @@ +# frozen_string_literal: true + +module OpenAI + module Internal + # @api private + module Util + # @api private + # + # @return [Float] + def self.monotonic_secs = Process.clock_gettime(Process::CLOCK_MONOTONIC) + + # @api private + # + # @param ns [Module, Class] + # + # @return [Enumerable] + def self.walk_namespaces(ns) + ns.constants(false).lazy.flat_map do + case (c = ns.const_get(_1, false)) + in Module | Class + walk_namespaces(c) + else + [] + end + end + .chain([ns]) + end + + class << self + # @api private + # + # @return [String] + def arch + case (arch = RbConfig::CONFIG["arch"])&.downcase + in nil + "unknown" + in /aarch64|arm64/ + "arm64" + in /x86_64/ + "x64" + in /arm/ + "arm" + else + "other:#{arch}" + end + end + + # @api private + # + # @return [String] + def os + case (host = RbConfig::CONFIG["host_os"])&.downcase + in nil + "Unknown" + in /linux/ + "Linux" + in /darwin/ + "MacOS" + in /freebsd/ + "FreeBSD" + in /openbsd/ + "OpenBSD" + in /mswin|mingw|cygwin|ucrt/ + "Windows" + else + "Other:#{host}" + end + end + end + + class << self + # @api private + # + # @param input [Object] + # + # @return [Boolean] + def primitive?(input) + case input + in true | false | Numeric | Symbol | String + true + else + false + end + end + + # @api private + # + # @param input [String, Boolean] + # + # @return [Boolean, Object] + def coerce_boolean(input) + case input.is_a?(String) ? input.downcase : input + in "true" + true + in "false" + false + else + input + end + end + + # @api private + # + # @param input [String, Boolean] + # + # @raise [ArgumentError] + # @return [Boolean, nil] + def coerce_boolean!(input) + case coerce_boolean(input) + in true | false | nil => coerced + coerced + else + raise ArgumentError.new("Unable to coerce #{input.inspect} into boolean value") + end + end + + # @api private + # + # @param input [String, Integer] + # + # @return [Integer, Object] + def coerce_integer(input) + Integer(input, exception: false) || input + end + + # @api private + # + # @param input [String, Integer, Float] + # + # @return [Float, Object] + def coerce_float(input) + Float(input, exception: false) || input + end + + # @api private + # + # @param input [Object] + # + # @return [Hash{Object=>Object}, Object] + def coerce_hash(input) + case input + in NilClass | Array | Set | Enumerator | StringIO | IO + input + else + input.respond_to?(:to_h) ? input.to_h : input + end + end + + # @api private + # + # @param input [Object] + # + # @raise [ArgumentError] + # @return [Hash{Object=>Object}, nil] + def coerce_hash!(input) + case coerce_hash(input) + in Hash | nil => coerced + coerced + else + message = "Expected a #{Hash} or #{OpenAI::Internal::Type::BaseModel}, got #{data.inspect}" + raise ArgumentError.new(message) + end + end + end + + class << self + # @api private + # + # @param lhs [Object] + # @param rhs [Object] + # @param concat [Boolean] + # + # @return [Object] + private def deep_merge_lr(lhs, rhs, concat: false) + case [lhs, rhs, concat] + in [Hash, Hash, _] + lhs.merge(rhs) { deep_merge_lr(_2, _3, concat: concat) } + in [Array, Array, true] + lhs.concat(rhs) + else + rhs + end + end + + # @api private + # + # Recursively merge one hash with another. If the values at a given key are not + # both hashes, just take the new value. + # + # @param values [Array] + # + # @param sentinel [Object, nil] the value to return if no values are provided. + # + # @param concat [Boolean] whether to merge sequences by concatenation. + # + # @return [Object] + def deep_merge(*values, sentinel: nil, concat: false) + case values + in [value, *values] + values.reduce(value) do |acc, val| + deep_merge_lr(acc, val, concat: concat) + end + else + sentinel + end + end + + # @api private + # + # @param data [Hash{Symbol=>Object}, Array, Object] + # @param pick [Symbol, Integer, Array, Proc, nil] + # @param blk [Proc, nil] + # + # @return [Object, nil] + def dig(data, pick, &blk) + case [data, pick] + in [_, nil] + data + in [Hash, Symbol] | [Array, Integer] + data.fetch(pick) { blk&.call } + in [Hash | Array, Array] + pick.reduce(data) do |acc, key| + case acc + in Hash if acc.key?(key) + acc.fetch(key) + in Array if key.is_a?(Integer) && key < acc.length + acc[key] + else + return blk&.call + end + end + in [_, Proc] + pick.call(data) + else + blk&.call + end + end + end + + class << self + # @api private + # + # @param uri [URI::Generic] + # + # @return [String] + def uri_origin(uri) + "#{uri.scheme}://#{uri.host}#{":#{uri.port}" unless uri.port == uri.default_port}" + end + + # @api private + # + # @param path [String, Array] + # + # @return [String] + def interpolate_path(path) + case path + in String + path + in [] + "" + in [String => p, *interpolations] + encoded = interpolations.map { ERB::Util.url_encode(_1) } + format(p, *encoded) + end + end + end + + class << self + # @api private + # + # @param query [String, nil] + # + # @return [Hash{String=>Array}] + def decode_query(query) + CGI.parse(query.to_s) + end + + # @api private + # + # @param query [Hash{String=>Array, String, nil}, nil] + # + # @return [String, nil] + def encode_query(query) + query.to_h.empty? ? nil : URI.encode_www_form(query) + end + end + + class << self + # @api private + # + # @param url [URI::Generic, String] + # + # @return [Hash{Symbol=>String, Integer, nil}] + def parse_uri(url) + parsed = URI::Generic.component.zip(URI.split(url)).to_h + {**parsed, query: decode_query(parsed.fetch(:query))} + end + + # @api private + # + # @param parsed [Hash{Symbol=>String, Integer, nil}] . + # + # @option parsed [String, nil] :scheme + # + # @option parsed [String, nil] :host + # + # @option parsed [Integer, nil] :port + # + # @option parsed [String, nil] :path + # + # @option parsed [Hash{String=>Array}] :query + # + # @return [URI::Generic] + def unparse_uri(parsed) + URI::Generic.build(**parsed, query: encode_query(parsed.fetch(:query))) + end + + # @api private + # + # @param lhs [Hash{Symbol=>String, Integer, nil}] . + # + # @option lhs [String, nil] :scheme + # + # @option lhs [String, nil] :host + # + # @option lhs [Integer, nil] :port + # + # @option lhs [String, nil] :path + # + # @option lhs [Hash{String=>Array}] :query + # + # @param rhs [Hash{Symbol=>String, Integer, nil}] . + # + # @option rhs [String, nil] :scheme + # + # @option rhs [String, nil] :host + # + # @option rhs [Integer, nil] :port + # + # @option rhs [String, nil] :path + # + # @option rhs [Hash{String=>Array}] :query + # + # @return [URI::Generic] + def join_parsed_uri(lhs, rhs) + base_path, base_query = lhs.fetch_values(:path, :query) + slashed = base_path.end_with?("/") ? base_path : "#{base_path}/" + + parsed_path, parsed_query = parse_uri(rhs.fetch(:path)).fetch_values(:path, :query) + override = URI::Generic.build(**rhs.slice(:scheme, :host, :port), path: parsed_path) + + joined = URI.join(URI::Generic.build(lhs.except(:path, :query)), slashed, override) + query = deep_merge( + joined.path == base_path ? base_query : {}, + parsed_query, + rhs[:query].to_h, + concat: true + ) + + joined.query = encode_query(query) + joined + end + end + + class << self + # @api private + # + # @param headers [Hash{String=>String, Integer, Array, nil}] + # + # @return [Hash{String=>String}] + def normalized_headers(*headers) + {}.merge(*headers.compact).to_h do |key, val| + value = + case val + in Array + val.filter_map { _1&.to_s&.strip }.join(", ") + else + val&.to_s&.strip + end + [key.downcase, value] + end + end + end + + # @api private + # + # An adapter that satisfies the IO interface required by `::IO.copy_stream` + class ReadIOAdapter + # @api private + # + # @return [Boolean, nil] + def close? = @closing + + # @api private + def close + case @stream + in Enumerator + OpenAI::Internal::Util.close_fused!(@stream) + in IO if close? + @stream.close + else + end + end + + # @api private + # + # @param max_len [Integer, nil] + # + # @return [String] + private def read_enum(max_len) + case max_len + in nil + @stream.to_a.join + in Integer + @buf << @stream.next while @buf.length < max_len + @buf.slice!(..max_len) + end + rescue StopIteration + @stream = nil + @buf.slice!(0..) + end + + # @api private + # + # @param max_len [Integer, nil] + # @param out_string [String, nil] + # + # @return [String, nil] + def read(max_len = nil, out_string = nil) + case @stream + in nil + nil + in IO | StringIO + @stream.read(max_len, out_string) + in Enumerator + read = read_enum(max_len) + case out_string + in String + out_string.replace(read) + in nil + read + end + end + .tap(&@blk) + end + + # @api private + # + # @param src [String, Pathname, StringIO, Enumerable] + # @param blk [Proc] + # + # @yieldparam [String] + def initialize(src, &blk) + @stream = + case src + in String + StringIO.new(src) + in Pathname + @closing = true + src.open(binmode: true) + else + src + end + @buf = String.new + @blk = blk + end + end + + class << self + # @param blk [Proc] + # + # @yieldparam [Enumerator::Yielder] + # @return [Enumerable] + def writable_enum(&blk) + Enumerator.new do |y| + buf = String.new + y.define_singleton_method(:write) do + self << buf.replace(_1) + buf.bytesize + end + + blk.call(y) + end + end + end + + # @type [Regexp] + JSON_CONTENT = %r{^application/(?:vnd(?:\.[^.]+)*\+)?json(?!l)} + # @type [Regexp] + JSONL_CONTENT = %r{^application/(:?x-(?:n|l)djson)|(:?(?:x-)?jsonl)} + + class << self + # @api private + # + # @param y [Enumerator::Yielder] + # @param val [Object] + # @param closing [Array] + # @param content_type [String, nil] + private def write_multipart_content(y, val:, closing:, content_type: nil) + content_line = "Content-Type: %s\r\n\r\n" + + case val + in OpenAI::FilePart + return write_multipart_content( + y, + val: val.content, + closing: closing, + content_type: val.content_type + ) + in Pathname + y << format(content_line, content_type || "application/octet-stream") + io = val.open(binmode: true) + closing << io.method(:close) + IO.copy_stream(io, y) + in IO + y << format(content_line, content_type || "application/octet-stream") + IO.copy_stream(val, y) + in StringIO + y << format(content_line, content_type || "application/octet-stream") + y << val.string + in -> { primitive?(_1) } + y << format(content_line, content_type || "text/plain") + y << val.to_s + else + y << format(content_line, content_type || "application/json") + y << JSON.generate(val) + end + y << "\r\n" + end + + # @api private + # + # @param y [Enumerator::Yielder] + # @param boundary [String] + # @param key [Symbol, String] + # @param val [Object] + # @param closing [Array] + private def write_multipart_chunk(y, boundary:, key:, val:, closing:) + y << "--#{boundary}\r\n" + y << "Content-Disposition: form-data" + + unless key.nil? + name = ERB::Util.url_encode(key.to_s) + y << "; name=\"#{name}\"" + end + + case val + in OpenAI::FilePart unless val.filename.nil? + filename = ERB::Util.url_encode(val.filename) + y << "; filename=\"#{filename}\"" + in Pathname | IO + filename = ERB::Util.url_encode(::File.basename(val.to_path)) + y << "; filename=\"#{filename}\"" + else + end + y << "\r\n" + + write_multipart_content(y, val: val, closing: closing) + end + + # @api private + # + # https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.1.md#special-considerations-for-multipart-content + # + # @param body [Object] + # + # @return [Array(String, Enumerable)] + private def encode_multipart_streaming(body) + boundary = SecureRandom.urlsafe_base64(60) + + closing = [] + strio = writable_enum do |y| + case body + in Hash + body.each do |key, val| + case val + in Array if val.all? { primitive?(_1) } + val.each do |v| + write_multipart_chunk(y, boundary: boundary, key: key, val: v, closing: closing) + end + else + write_multipart_chunk(y, boundary: boundary, key: key, val: val, closing: closing) + end + end + else + write_multipart_chunk(y, boundary: boundary, key: nil, val: body, closing: closing) + end + y << "--#{boundary}--\r\n" + end + + fused_io = fused_enum(strio) { closing.each(&:call) } + [boundary, fused_io] + end + + # @api private + # + # @param headers [Hash{String=>String}] + # @param body [Object] + # + # @return [Object] + def encode_content(headers, body) + # rubocop:disable Style/CaseEquality + # rubocop:disable Layout/LineLength + content_type = headers["content-type"] + case [content_type, body] + in [OpenAI::Internal::Util::JSON_CONTENT, Hash | Array | -> { primitive?(_1) }] + [headers, JSON.generate(body)] + in [OpenAI::Internal::Util::JSONL_CONTENT, Enumerable] unless OpenAI::Internal::Type::FileInput === body + [headers, body.lazy.map { JSON.generate(_1) }] + in [%r{^multipart/form-data}, Hash | OpenAI::Internal::Type::FileInput] + boundary, strio = encode_multipart_streaming(body) + headers = {**headers, "content-type" => "#{content_type}; boundary=#{boundary}"} + [headers, strio] + in [_, Symbol | Numeric] + [headers, body.to_s] + in [_, StringIO] + [headers, body.string] + in [_, OpenAI::FilePart] + [headers, body.content] + else + [headers, body] + end + # rubocop:enable Layout/LineLength + # rubocop:enable Style/CaseEquality + end + + # @api private + # + # https://www.iana.org/assignments/character-sets/character-sets.xhtml + # + # @param content_type [String] + # @param text [String] + def force_charset!(content_type, text:) + charset = /charset=([^;\s]+)/.match(content_type)&.captures&.first + + return unless charset + + begin + encoding = Encoding.find(charset) + text.force_encoding(encoding) + rescue ArgumentError + nil + end + end + + # @api private + # + # Assumes each chunk in stream has `Encoding::BINARY`. + # + # @param headers [Hash{String=>String}, Net::HTTPHeader] + # @param stream [Enumerable] + # @param suppress_error [Boolean] + # + # @raise [JSON::ParserError] + # @return [Object] + def decode_content(headers, stream:, suppress_error: false) + case (content_type = headers["content-type"]) + in OpenAI::Internal::Util::JSON_CONTENT + json = stream.to_a.join + begin + JSON.parse(json, symbolize_names: true) + rescue JSON::ParserError => e + raise e unless suppress_error + json + end + in OpenAI::Internal::Util::JSONL_CONTENT + lines = decode_lines(stream) + chain_fused(lines) do |y| + lines.each { y << JSON.parse(_1, symbolize_names: true) } + end + in %r{^text/event-stream} + lines = decode_lines(stream) + decode_sse(lines) + else + text = stream.to_a.join + force_charset!(content_type, text: text) + StringIO.new(text) + end + end + end + + class << self + # @api private + # + # https://doc.rust-lang.org/std/iter/trait.FusedIterator.html + # + # @param enum [Enumerable] + # @param external [Boolean] + # @param close [Proc] + # + # @return [Enumerable] + def fused_enum(enum, external: false, &close) + fused = false + iter = Enumerator.new do |y| + next if fused + + fused = true + if external + loop { y << enum.next } + else + enum.each(&y) + end + ensure + close&.call + close = nil + end + + iter.define_singleton_method(:rewind) do + fused = true + self + end + iter + end + + # @api private + # + # @param enum [Enumerable, nil] + def close_fused!(enum) + return unless enum.is_a?(Enumerator) + + # rubocop:disable Lint/UnreachableLoop + enum.rewind.each { break } + # rubocop:enable Lint/UnreachableLoop + end + + # @api private + # + # @param enum [Enumerable, nil] + # @param blk [Proc] + # + # @yieldparam [Enumerator::Yielder] + # @return [Enumerable] + def chain_fused(enum, &blk) + iter = Enumerator.new { blk.call(_1) } + fused_enum(iter) { close_fused!(enum) } + end + end + + class << self + # @api private + # + # Assumes Strings have been forced into having `Encoding::BINARY`. + # + # This decoder is responsible for reassembling lines split across multiple + # fragments. + # + # @param enum [Enumerable] + # + # @return [Enumerable] + def decode_lines(enum) + re = /(\r\n|\r|\n)/ + buffer = String.new + cr_seen = nil + + chain_fused(enum) do |y| + enum.each do |row| + offset = buffer.bytesize + buffer << row + while (match = re.match(buffer, cr_seen&.to_i || offset)) + case [match.captures.first, cr_seen] + in ["\r", nil] + cr_seen = match.end(1) + next + in ["\r" | "\r\n", Integer] + y << buffer.slice!(..(cr_seen.pred)) + else + y << buffer.slice!(..(match.end(1).pred)) + end + offset = 0 + cr_seen = nil + end + end + + y << buffer.slice!(..(cr_seen.pred)) unless cr_seen.nil? + y << buffer unless buffer.empty? + end + end + + # @api private + # + # https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream + # + # Assumes that `lines` has been decoded with `#decode_lines`. + # + # @param lines [Enumerable] + # + # @return [EnumerableObject}>] + def decode_sse(lines) + # rubocop:disable Metrics/BlockLength + chain_fused(lines) do |y| + blank = {event: nil, data: nil, id: nil, retry: nil} + current = {} + + lines.each do |line| + case line.sub(/\R$/, "") + in "" + next if current.empty? + y << {**blank, **current} + current = {} + in /^:/ + next + in /^([^:]+):\s?(.*)$/ + field, value = Regexp.last_match.captures + case field + in "event" + current.merge!(event: value) + in "data" + (current[:data] ||= String.new) << (value << "\n") + in "id" unless value.include?("\0") + current.merge!(id: value) + in "retry" if /^\d+$/ =~ value + current.merge!(retry: Integer(value)) + else + end + else + end + end + # rubocop:enable Metrics/BlockLength + + y << {**blank, **current} unless current.empty? + end + end + end + + # @api private + module SorbetRuntimeSupport + class MissingSorbetRuntimeError < ::RuntimeError + end + + # @api private + # + # @return [Hash{Symbol=>Object}] + private def sorbet_runtime_constants = @sorbet_runtime_constants ||= {} + + # @api private + # + # @param name [Symbol] + def const_missing(name) + super unless sorbet_runtime_constants.key?(name) + + unless Object.const_defined?(:T) + message = "Trying to access a Sorbet constant #{name.inspect} without `sorbet-runtime`." + raise MissingSorbetRuntimeError.new(message) + end + + sorbet_runtime_constants.fetch(name).call + end + + # @api private + # + # @param name [Symbol] + # + # @return [Boolean] + def sorbet_constant_defined?(name) = sorbet_runtime_constants.key?(name) + + # @api private + # + # @param name [Symbol] + # @param blk [Proc] + def define_sorbet_constant!(name, &blk) = sorbet_runtime_constants.store(name, blk) + + # @api private + # + # @return [Object] + def to_sorbet_type = raise NotImplementedError + + class << self + # @api private + # + # @param type [OpenAI::Internal::Util::SorbetRuntimeSupport, Object] + # + # @return [Object] + def to_sorbet_type(type) + case type + in OpenAI::Internal::Util::SorbetRuntimeSupport + type.to_sorbet_type + in Class | Module + type + in true | false + T::Boolean + else + type.class + end + end + end + end + + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + define_sorbet_constant!(:ParsedUri) do + T.type_alias do + { + scheme: T.nilable(String), + host: T.nilable(String), + port: T.nilable(Integer), + path: T.nilable(String), + query: T::Hash[String, T::Array[String]] + } + end + end + + define_sorbet_constant!(:ServerSentEvent) do + T.type_alias do + { + event: T.nilable(String), + data: T.nilable(String), + id: T.nilable(String), + retry: T.nilable(Integer) + } + end + end + end + end +end diff --git a/lib/openai/models.rb b/lib/openai/models.rb new file mode 100644 index 00000000..df4aaaa4 --- /dev/null +++ b/lib/openai/models.rb @@ -0,0 +1,259 @@ +# frozen_string_literal: true + +module OpenAI + [OpenAI::Internal::Type::BaseModel, *OpenAI::Internal::Type::BaseModel.subclasses].each do |cls| + cls.define_sorbet_constant!(:OrHash) { T.type_alias { T.any(cls, OpenAI::Internal::AnyHash) } } + end + + OpenAI::Internal::Util.walk_namespaces(OpenAI::Models).each do |mod| + case mod + in OpenAI::Internal::Type::Enum | OpenAI::Internal::Type::Union + mod.constants.each do |name| + case mod.const_get(name) + in true | false + mod.define_sorbet_constant!(:TaggedBoolean) { T.type_alias { T::Boolean } } + mod.define_sorbet_constant!(:OrBoolean) { T.type_alias { T::Boolean } } + in Integer + mod.define_sorbet_constant!(:TaggedInteger) { T.type_alias { Integer } } + mod.define_sorbet_constant!(:OrInteger) { T.type_alias { Integer } } + in Float + mod.define_sorbet_constant!(:TaggedFloat) { T.type_alias { Float } } + mod.define_sorbet_constant!(:OrFloat) { T.type_alias { Float } } + in Symbol + mod.define_sorbet_constant!(:TaggedSymbol) { T.type_alias { Symbol } } + mod.define_sorbet_constant!(:OrSymbol) { T.type_alias { T.any(Symbol, String) } } + else + end + end + else + end + end + + OpenAI::Internal::Util.walk_namespaces(OpenAI::Models) + .lazy + .grep(OpenAI::Internal::Type::Union) + .each do |mod| + const = :Variants + next if mod.sorbet_constant_defined?(const) + + mod.define_sorbet_constant!(const) { T.type_alias { mod.to_sorbet_type } } + end + + AllModels = OpenAI::Models::AllModels + + Audio = OpenAI::Models::Audio + + AudioModel = OpenAI::Models::AudioModel + + AudioResponseFormat = OpenAI::Models::AudioResponseFormat + + AutoFileChunkingStrategyParam = OpenAI::Models::AutoFileChunkingStrategyParam + + Batch = OpenAI::Models::Batch + + BatchCancelParams = OpenAI::Models::BatchCancelParams + + BatchCreateParams = OpenAI::Models::BatchCreateParams + + BatchError = OpenAI::Models::BatchError + + BatchListParams = OpenAI::Models::BatchListParams + + BatchRequestCounts = OpenAI::Models::BatchRequestCounts + + BatchRetrieveParams = OpenAI::Models::BatchRetrieveParams + + Beta = OpenAI::Models::Beta + + Chat = OpenAI::Models::Chat + + ChatModel = OpenAI::Models::ChatModel + + ComparisonFilter = OpenAI::Models::ComparisonFilter + + Completion = OpenAI::Models::Completion + + CompletionChoice = OpenAI::Models::CompletionChoice + + CompletionCreateParams = OpenAI::Models::CompletionCreateParams + + CompletionUsage = OpenAI::Models::CompletionUsage + + CompoundFilter = OpenAI::Models::CompoundFilter + + ContainerCreateParams = OpenAI::Models::ContainerCreateParams + + ContainerDeleteParams = OpenAI::Models::ContainerDeleteParams + + ContainerListParams = OpenAI::Models::ContainerListParams + + ContainerRetrieveParams = OpenAI::Models::ContainerRetrieveParams + + Containers = OpenAI::Models::Containers + + Conversations = OpenAI::Models::Conversations + + CreateEmbeddingResponse = OpenAI::Models::CreateEmbeddingResponse + + CustomToolInputFormat = OpenAI::Models::CustomToolInputFormat + + Embedding = OpenAI::Models::Embedding + + EmbeddingCreateParams = OpenAI::Models::EmbeddingCreateParams + + EmbeddingModel = OpenAI::Models::EmbeddingModel + + ErrorObject = OpenAI::Models::ErrorObject + + EvalCreateParams = OpenAI::Models::EvalCreateParams + + EvalCustomDataSourceConfig = OpenAI::Models::EvalCustomDataSourceConfig + + EvalDeleteParams = OpenAI::Models::EvalDeleteParams + + EvalListParams = OpenAI::Models::EvalListParams + + EvalRetrieveParams = OpenAI::Models::EvalRetrieveParams + + Evals = OpenAI::Models::Evals + + EvalStoredCompletionsDataSourceConfig = OpenAI::Models::EvalStoredCompletionsDataSourceConfig + + EvalUpdateParams = OpenAI::Models::EvalUpdateParams + + FileChunkingStrategy = OpenAI::Models::FileChunkingStrategy + + FileChunkingStrategyParam = OpenAI::Models::FileChunkingStrategyParam + + FileContent = OpenAI::Models::FileContent + + FileContentParams = OpenAI::Models::FileContentParams + + FileCreateParams = OpenAI::Models::FileCreateParams + + FileDeleted = OpenAI::Models::FileDeleted + + FileDeleteParams = OpenAI::Models::FileDeleteParams + + FileListParams = OpenAI::Models::FileListParams + + FileObject = OpenAI::Models::FileObject + + FilePurpose = OpenAI::Models::FilePurpose + + FileRetrieveParams = OpenAI::Models::FileRetrieveParams + + FineTuning = OpenAI::Models::FineTuning + + FunctionDefinition = OpenAI::Models::FunctionDefinition + + # @type [OpenAI::Internal::Type::Converter] + FunctionParameters = OpenAI::Models::FunctionParameters + + Graders = OpenAI::Models::Graders + + Image = OpenAI::Models::Image + + ImageCreateVariationParams = OpenAI::Models::ImageCreateVariationParams + + ImageEditCompletedEvent = OpenAI::Models::ImageEditCompletedEvent + + ImageEditParams = OpenAI::Models::ImageEditParams + + ImageEditPartialImageEvent = OpenAI::Models::ImageEditPartialImageEvent + + ImageEditStreamEvent = OpenAI::Models::ImageEditStreamEvent + + ImageGenCompletedEvent = OpenAI::Models::ImageGenCompletedEvent + + ImageGenerateParams = OpenAI::Models::ImageGenerateParams + + ImageGenPartialImageEvent = OpenAI::Models::ImageGenPartialImageEvent + + ImageGenStreamEvent = OpenAI::Models::ImageGenStreamEvent + + ImageModel = OpenAI::Models::ImageModel + + ImagesResponse = OpenAI::Models::ImagesResponse + + # @type [OpenAI::Internal::Type::Converter] + Metadata = OpenAI::Models::Metadata + + Model = OpenAI::Models::Model + + ModelDeleted = OpenAI::Models::ModelDeleted + + ModelDeleteParams = OpenAI::Models::ModelDeleteParams + + ModelListParams = OpenAI::Models::ModelListParams + + ModelRetrieveParams = OpenAI::Models::ModelRetrieveParams + + Moderation = OpenAI::Models::Moderation + + ModerationCreateParams = OpenAI::Models::ModerationCreateParams + + ModerationImageURLInput = OpenAI::Models::ModerationImageURLInput + + ModerationModel = OpenAI::Models::ModerationModel + + ModerationMultiModalInput = OpenAI::Models::ModerationMultiModalInput + + ModerationTextInput = OpenAI::Models::ModerationTextInput + + OtherFileChunkingStrategyObject = OpenAI::Models::OtherFileChunkingStrategyObject + + Reasoning = OpenAI::Models::Reasoning + + ReasoningEffort = OpenAI::Models::ReasoningEffort + + ResponseFormatJSONObject = OpenAI::Models::ResponseFormatJSONObject + + ResponseFormatJSONSchema = OpenAI::Models::ResponseFormatJSONSchema + + ResponseFormatText = OpenAI::Models::ResponseFormatText + + ResponseFormatTextGrammar = OpenAI::Models::ResponseFormatTextGrammar + + ResponseFormatTextPython = OpenAI::Models::ResponseFormatTextPython + + Responses = OpenAI::Models::Responses + + ResponsesModel = OpenAI::Models::ResponsesModel + + StaticFileChunkingStrategy = OpenAI::Models::StaticFileChunkingStrategy + + StaticFileChunkingStrategyObject = OpenAI::Models::StaticFileChunkingStrategyObject + + StaticFileChunkingStrategyObjectParam = OpenAI::Models::StaticFileChunkingStrategyObjectParam + + Upload = OpenAI::Models::Upload + + UploadCancelParams = OpenAI::Models::UploadCancelParams + + UploadCompleteParams = OpenAI::Models::UploadCompleteParams + + UploadCreateParams = OpenAI::Models::UploadCreateParams + + Uploads = OpenAI::Models::Uploads + + VectorStore = OpenAI::Models::VectorStore + + VectorStoreCreateParams = OpenAI::Models::VectorStoreCreateParams + + VectorStoreDeleted = OpenAI::Models::VectorStoreDeleted + + VectorStoreDeleteParams = OpenAI::Models::VectorStoreDeleteParams + + VectorStoreListParams = OpenAI::Models::VectorStoreListParams + + VectorStoreRetrieveParams = OpenAI::Models::VectorStoreRetrieveParams + + VectorStores = OpenAI::Models::VectorStores + + VectorStoreSearchParams = OpenAI::Models::VectorStoreSearchParams + + VectorStoreUpdateParams = OpenAI::Models::VectorStoreUpdateParams + + Webhooks = OpenAI::Models::Webhooks +end diff --git a/lib/openai/models/all_models.rb b/lib/openai/models/all_models.rb new file mode 100644 index 00000000..188ab131 --- /dev/null +++ b/lib/openai/models/all_models.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module AllModels + extend OpenAI::Internal::Type::Union + + variant String + + variant enum: -> { OpenAI::ChatModel } + + variant enum: -> { OpenAI::AllModels::ResponsesOnlyModel } + + module ResponsesOnlyModel + extend OpenAI::Internal::Type::Enum + + O1_PRO = :"o1-pro" + O1_PRO_2025_03_19 = :"o1-pro-2025-03-19" + O3_PRO = :"o3-pro" + O3_PRO_2025_06_10 = :"o3-pro-2025-06-10" + O3_DEEP_RESEARCH = :"o3-deep-research" + O3_DEEP_RESEARCH_2025_06_26 = :"o3-deep-research-2025-06-26" + O4_MINI_DEEP_RESEARCH = :"o4-mini-deep-research" + O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26" + COMPUTER_USE_PREVIEW = :"computer-use-preview" + COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11" + + # @!method self.values + # @return [Array] + end + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::ChatModel, Symbol, OpenAI::Models::AllModels::ResponsesOnlyModel)] + end + end +end diff --git a/lib/openai/models/audio/speech_create_params.rb b/lib/openai/models/audio/speech_create_params.rb index fa2f4155..a260ccdd 100644 --- a/lib/openai/models/audio/speech_create_params.rb +++ b/lib/openai/models/audio/speech_create_params.rb @@ -3,10 +3,10 @@ module OpenAI module Models module Audio - class SpeechCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Audio::Speech#create + class SpeechCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute input # The text to generate audio for. The maximum length is 4096 characters. @@ -16,90 +16,133 @@ class SpeechCreateParams < OpenAI::BaseModel # @!attribute model # One of the available [TTS models](https://platform.openai.com/docs/models#tts): - # `tts-1` or `tts-1-hd` + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. # # @return [String, Symbol, OpenAI::Models::Audio::SpeechModel] - required :model, union: -> { OpenAI::Models::Audio::SpeechCreateParams::Model } + required :model, union: -> { OpenAI::Audio::SpeechCreateParams::Model } # @!attribute voice # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - # `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the - # voices are available in the - # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + # + # @return [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] + required :voice, union: -> { OpenAI::Audio::SpeechCreateParams::Voice } + + # @!attribute instructions + # Control the voice of your generated audio with additional instructions. Does not + # work with `tts-1` or `tts-1-hd`. # - # @return [Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] - required :voice, enum: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice } + # @return [String, nil] + optional :instructions, String - # @!attribute [r] response_format + # @!attribute response_format # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, - # `wav`, and `pcm`. + # `wav`, and `pcm`. # # @return [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat, nil] - optional :response_format, enum: -> { OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat } - - # @!parse - # # @return [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat] - # attr_writer :response_format + optional :response_format, enum: -> { OpenAI::Audio::SpeechCreateParams::ResponseFormat } - # @!attribute [r] speed + # @!attribute speed # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - # the default. + # the default. # # @return [Float, nil] optional :speed, Float - # @!parse - # # @return [Float] - # attr_writer :speed - - # @!parse - # # @param input [String] - # # @param model [String, Symbol, OpenAI::Models::Audio::SpeechModel] - # # @param voice [Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] - # # @param response_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat] - # # @param speed [Float] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(input:, model:, voice:, response_format: nil, speed: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!attribute stream_format + # The format to stream the audio in. Supported formats are `sse` and `audio`. + # `sse` is not supported for `tts-1` or `tts-1-hd`. + # + # @return [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat, nil] + optional :stream_format, enum: -> { OpenAI::Audio::SpeechCreateParams::StreamFormat } - # @abstract + # @!method initialize(input:, model:, voice:, instructions: nil, response_format: nil, speed: nil, stream_format: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::SpeechCreateParams} for more details. + # + # @param input [String] The text to generate audio for. The maximum length is 4096 characters. + # + # @param model [String, Symbol, OpenAI::Models::Audio::SpeechModel] One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # + # @param voice [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] The voice to use when generating the audio. Supported voices are `alloy`, `ash`, # + # @param instructions [String] Control the voice of your generated audio with additional instructions. Does not + # + # @param response_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat] The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav + # + # @param speed [Float] The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + # + # @param stream_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat] The format to stream the audio in. Supported formats are `sse` and `audio`. `sse + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): - # `tts-1` or `tts-1-hd` - class Model < OpenAI::Union + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + module Model + extend OpenAI::Internal::Type::Union + variant String - # One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` - variant enum: -> { OpenAI::Models::Audio::SpeechModel } + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + variant enum: -> { OpenAI::Audio::SpeechModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::Audio::SpeechModel)] end - # @abstract - # # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - # `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the - # voices are available in the - # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). - class Voice < OpenAI::Enum + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + module Voice + extend OpenAI::Internal::Type::Union + + variant String + + variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ALLOY } + + variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ASH } + + variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::BALLAD } + + variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::CORAL } + + variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ECHO } + + variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::SAGE } + + variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::SHIMMER } + + variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::VERSE } + + # @!method self.variants + # @return [Array(String, Symbol)] + + define_sorbet_constant!(:Variants) do + T.type_alias { T.any(String, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol) } + end + + # @!group + ALLOY = :alloy ASH = :ash + BALLAD = :ballad CORAL = :coral ECHO = :echo - FABLE = :fable - ONYX = :onyx - NOVA = :nova SAGE = :sage SHIMMER = :shimmer + VERSE = :verse - finalize! + # @!endgroup end - # @abstract - # # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, - # `wav`, and `pcm`. - class ResponseFormat < OpenAI::Enum + # `wav`, and `pcm`. + module ResponseFormat + extend OpenAI::Internal::Type::Enum + MP3 = :mp3 OPUS = :opus AAC = :aac @@ -107,7 +150,20 @@ class ResponseFormat < OpenAI::Enum WAV = :wav PCM = :pcm - finalize! + # @!method self.values + # @return [Array] + end + + # The format to stream the audio in. Supported formats are `sse` and `audio`. + # `sse` is not supported for `tts-1` or `tts-1-hd`. + module StreamFormat + extend OpenAI::Internal::Type::Enum + + SSE = :sse + AUDIO = :audio + + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/audio/speech_model.rb b/lib/openai/models/audio/speech_model.rb index 84765c9f..39245ea8 100644 --- a/lib/openai/models/audio/speech_model.rb +++ b/lib/openai/models/audio/speech_model.rb @@ -3,13 +3,15 @@ module OpenAI module Models module Audio - # @abstract - # - class SpeechModel < OpenAI::Enum + module SpeechModel + extend OpenAI::Internal::Type::Enum + TTS_1 = :"tts-1" TTS_1_HD = :"tts-1-hd" + GPT_4O_MINI_TTS = :"gpt-4o-mini-tts" - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/audio/transcription.rb b/lib/openai/models/audio/transcription.rb index 6bd2d97b..989688b9 100644 --- a/lib/openai/models/audio/transcription.rb +++ b/lib/openai/models/audio/transcription.rb @@ -3,22 +3,172 @@ module OpenAI module Models module Audio - class Transcription < OpenAI::BaseModel + class Transcription < OpenAI::Internal::Type::BaseModel # @!attribute text # The transcribed text. # # @return [String] required :text, String - # @!parse - # # Represents a transcription response returned by model, based on the provided - # # input. - # # - # # @param text [String] - # # - # def initialize(text:, **) = super + # @!attribute logprobs + # The log probabilities of the tokens in the transcription. Only returned with the + # models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added + # to the `include` array. + # + # @return [Array, nil] + optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::Transcription::Logprob] } + + # @!attribute usage + # Token usage statistics for the request. + # + # @return [OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration, nil] + optional :usage, union: -> { OpenAI::Audio::Transcription::Usage } + + # @!method initialize(text:, logprobs: nil, usage: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::Transcription} for more details. + # + # Represents a transcription response returned by model, based on the provided + # input. + # + # @param text [String] The transcribed text. + # + # @param logprobs [Array] The log probabilities of the tokens in the transcription. Only returned with the + # + # @param usage [OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration] Token usage statistics for the request. + + class Logprob < OpenAI::Internal::Type::BaseModel + # @!attribute token + # The token in the transcription. + # + # @return [String, nil] + optional :token, String + + # @!attribute bytes + # The bytes of the token. + # + # @return [Array, nil] + optional :bytes, OpenAI::Internal::Type::ArrayOf[Float] + + # @!attribute logprob + # The log probability of the token. + # + # @return [Float, nil] + optional :logprob, Float + + # @!method initialize(token: nil, bytes: nil, logprob: nil) + # @param token [String] The token in the transcription. + # + # @param bytes [Array] The bytes of the token. + # + # @param logprob [Float] The log probability of the token. + end + + # Token usage statistics for the request. + # + # @see OpenAI::Models::Audio::Transcription#usage + module Usage + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Usage statistics for models billed by token usage. + variant :tokens, -> { OpenAI::Audio::Transcription::Usage::Tokens } + + # Usage statistics for models billed by audio input duration. + variant :duration, -> { OpenAI::Audio::Transcription::Usage::Duration } + + class Tokens < OpenAI::Internal::Type::BaseModel + # @!attribute input_tokens + # Number of input tokens billed for this request. + # + # @return [Integer] + required :input_tokens, Integer + + # @!attribute output_tokens + # Number of output tokens generated. + # + # @return [Integer] + required :output_tokens, Integer + + # @!attribute total_tokens + # Total number of tokens used (input + output). + # + # @return [Integer] + required :total_tokens, Integer + + # @!attribute type + # The type of the usage object. Always `tokens` for this variant. + # + # @return [Symbol, :tokens] + required :type, const: :tokens + + # @!attribute input_token_details + # Details about the input tokens billed for this request. + # + # @return [OpenAI::Models::Audio::Transcription::Usage::Tokens::InputTokenDetails, nil] + optional :input_token_details, -> { OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails } + + # @!method initialize(input_tokens:, output_tokens:, total_tokens:, input_token_details: nil, type: :tokens) + # Usage statistics for models billed by token usage. + # + # @param input_tokens [Integer] Number of input tokens billed for this request. + # + # @param output_tokens [Integer] Number of output tokens generated. + # + # @param total_tokens [Integer] Total number of tokens used (input + output). + # + # @param input_token_details [OpenAI::Models::Audio::Transcription::Usage::Tokens::InputTokenDetails] Details about the input tokens billed for this request. + # + # @param type [Symbol, :tokens] The type of the usage object. Always `tokens` for this variant. + + # @see OpenAI::Models::Audio::Transcription::Usage::Tokens#input_token_details + class InputTokenDetails < OpenAI::Internal::Type::BaseModel + # @!attribute audio_tokens + # Number of audio tokens billed for this request. + # + # @return [Integer, nil] + optional :audio_tokens, Integer + + # @!attribute text_tokens + # Number of text tokens billed for this request. + # + # @return [Integer, nil] + optional :text_tokens, Integer + + # @!method initialize(audio_tokens: nil, text_tokens: nil) + # Details about the input tokens billed for this request. + # + # @param audio_tokens [Integer] Number of audio tokens billed for this request. + # + # @param text_tokens [Integer] Number of text tokens billed for this request. + end + end + + class Duration < OpenAI::Internal::Type::BaseModel + # @!attribute seconds + # Duration of the input audio in seconds. + # + # @return [Float] + required :seconds, Float + + # @!attribute type + # The type of the usage object. Always `duration` for this variant. + # + # @return [Symbol, :duration] + required :type, const: :duration + + # @!method initialize(seconds:, type: :duration) + # Usage statistics for models billed by audio input duration. + # + # @param seconds [Float] Duration of the input audio in seconds. + # + # @param type [Symbol, :duration] The type of the usage object. Always `duration` for this variant. + end - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method self.variants + # @return [Array(OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration)] + end end end end diff --git a/lib/openai/models/audio/transcription_create_params.rb b/lib/openai/models/audio/transcription_create_params.rb index d0c79556..2ad4984e 100644 --- a/lib/openai/models/audio/transcription_create_params.rb +++ b/lib/openai/models/audio/transcription_create_params.rb @@ -3,134 +3,213 @@ module OpenAI module Models module Audio - class TranscriptionCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Audio::Transcriptions#create + # + # @see OpenAI::Resources::Audio::Transcriptions#create_streaming + class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute file # The audio file object (not file name) to transcribe, in one of these formats: - # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. # - # @return [IO, StringIO] - required :file, IO + # @return [Pathname, StringIO, IO, String, OpenAI::FilePart] + required :file, OpenAI::Internal::Type::FileInput # @!attribute model - # ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. + # ID of the model to use. The options are `gpt-4o-transcribe`, + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). # # @return [String, Symbol, OpenAI::Models::AudioModel] - required :model, union: -> { OpenAI::Models::Audio::TranscriptionCreateParams::Model } + required :model, union: -> { OpenAI::Audio::TranscriptionCreateParams::Model } - # @!attribute [r] language + # @!attribute chunking_strategy + # Controls how the audio is cut into chunks. When set to `"auto"`, the server + # first normalizes loudness and then uses voice activity detection (VAD) to choose + # boundaries. `server_vad` object can be provided to tweak VAD detection + # parameters manually. If unset, the audio is transcribed as a single block. + # + # @return [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] + optional :chunking_strategy, + union: -> { OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy }, + nil?: true + + # @!attribute include + # Additional information to include in the transcription response. `logprobs` will + # return the log probabilities of the tokens in the response to understand the + # model's confidence in the transcription. `logprobs` only works with + # response_format set to `json` and only with the models `gpt-4o-transcribe` and + # `gpt-4o-mini-transcribe`. + # + # @return [Array, nil] + optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Audio::TranscriptionInclude] } + + # @!attribute language # The language of the input audio. Supplying the input language in - # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - # format will improve accuracy and latency. + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. # # @return [String, nil] optional :language, String - # @!parse - # # @return [String] - # attr_writer :language - - # @!attribute [r] prompt + # @!attribute prompt # An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should match the audio language. + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. # # @return [String, nil] optional :prompt, String - # @!parse - # # @return [String] - # attr_writer :prompt - - # @!attribute [r] response_format + # @!attribute response_format # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. # # @return [Symbol, OpenAI::Models::AudioResponseFormat, nil] - optional :response_format, enum: -> { OpenAI::Models::AudioResponseFormat } - - # @!parse - # # @return [Symbol, OpenAI::Models::AudioResponseFormat] - # attr_writer :response_format + optional :response_format, enum: -> { OpenAI::AudioResponseFormat } - # @!attribute [r] temperature + # @!attribute temperature # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. # # @return [Float, nil] optional :temperature, Float - # @!parse - # # @return [Float] - # attr_writer :temperature - - # @!attribute [r] timestamp_granularities + # @!attribute timestamp_granularities # The timestamp granularities to populate for this transcription. - # `response_format` must be set `verbose_json` to use timestamp granularities. - # Either or both of these options are supported: `word`, or `segment`. Note: There - # is no additional latency for segment timestamps, but generating word timestamps - # incurs additional latency. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. # # @return [Array, nil] optional :timestamp_granularities, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Audio::TranscriptionCreateParams::TimestampGranularity] } - - # @!parse - # # @return [Array] - # attr_writer :timestamp_granularities - - # @!parse - # # @param file [IO, StringIO] - # # @param model [String, Symbol, OpenAI::Models::AudioModel] - # # @param language [String] - # # @param prompt [String] - # # @param response_format [Symbol, OpenAI::Models::AudioResponseFormat] - # # @param temperature [Float] - # # @param timestamp_granularities [Array] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # file:, - # model:, - # language: nil, - # prompt: nil, - # response_format: nil, - # temperature: nil, - # timestamp_granularities: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. - class Model < OpenAI::Union + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity] } + + # @!method initialize(file:, model:, chunking_strategy: nil, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranscriptionCreateParams} for more details. + # + # @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) to transcribe, in one of these formats: fl + # + # @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transc + # + # @param chunking_strategy [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] Controls how the audio is cut into chunks. When set to `"auto"`, the server firs + # + # @param include [Array] Additional information to include in the transcription response. + # + # @param language [String] The language of the input audio. Supplying the input language in [ISO-639-1](htt + # + # @param prompt [String] An optional text to guide the model's style or continue a previous audio segment + # + # @param response_format [Symbol, OpenAI::Models::AudioResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo + # + # @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # + # @param timestamp_granularities [Array] The timestamp granularities to populate for this transcription. `response_format + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # ID of the model to use. The options are `gpt-4o-transcribe`, + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). + module Model + extend OpenAI::Internal::Type::Union + variant String - # ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - variant enum: -> { OpenAI::Models::AudioModel } + # ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source Whisper V2 model). + variant enum: -> { OpenAI::AudioModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::AudioModel)] end - # @abstract - # - class TimestampGranularity < OpenAI::Enum + # Controls how the audio is cut into chunks. When set to `"auto"`, the server + # first normalizes loudness and then uses voice activity detection (VAD) to choose + # boundaries. `server_vad` object can be provided to tweak VAD detection + # parameters manually. If unset, the audio is transcribed as a single block. + module ChunkingStrategy + extend OpenAI::Internal::Type::Union + + # Automatically set chunking parameters based on the audio. Must be set to `"auto"`. + variant const: :auto + + variant -> { OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig } + + class VadConfig < OpenAI::Internal::Type::BaseModel + # @!attribute type + # Must be set to `server_vad` to enable manual chunking using server side VAD. + # + # @return [Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type] + required :type, enum: -> { OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type } + + # @!attribute prefix_padding_ms + # Amount of audio to include before the VAD detected speech (in milliseconds). + # + # @return [Integer, nil] + optional :prefix_padding_ms, Integer + + # @!attribute silence_duration_ms + # Duration of silence to detect speech stop (in milliseconds). With shorter values + # the model will respond more quickly, but may jump in on short pauses from the + # user. + # + # @return [Integer, nil] + optional :silence_duration_ms, Integer + + # @!attribute threshold + # Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher + # threshold will require louder audio to activate the model, and thus might + # perform better in noisy environments. + # + # @return [Float, nil] + optional :threshold, Float + + # @!method initialize(type:, prefix_padding_ms: nil, silence_duration_ms: nil, threshold: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig} + # for more details. + # + # @param type [Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type] Must be set to `server_vad` to enable manual chunking using server side VAD. + # + # @param prefix_padding_ms [Integer] Amount of audio to include before the VAD detected speech (in + # + # @param silence_duration_ms [Integer] Duration of silence to detect speech stop (in milliseconds). + # + # @param threshold [Float] Sensitivity threshold (0.0 to 1.0) for voice activity detection. A + + # Must be set to `server_vad` to enable manual chunking using server side VAD. + # + # @see OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig#type + module Type + extend OpenAI::Internal::Type::Enum + + SERVER_VAD = :server_vad + + # @!method self.values + # @return [Array] + end + end + + # @!method self.variants + # @return [Array(Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig)] + end + + module TimestampGranularity + extend OpenAI::Internal::Type::Enum + WORD = :word SEGMENT = :segment - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/audio/transcription_create_response.rb b/lib/openai/models/audio/transcription_create_response.rb index 1ff3100f..0bbe16b7 100644 --- a/lib/openai/models/audio/transcription_create_response.rb +++ b/lib/openai/models/audio/transcription_create_response.rb @@ -3,16 +3,23 @@ module OpenAI module Models module Audio - # @abstract - # # Represents a transcription response returned by model, based on the provided - # input. - class TranscriptionCreateResponse < OpenAI::Union + # input. + # + # @see OpenAI::Resources::Audio::Transcriptions#create + # + # @see OpenAI::Resources::Audio::Transcriptions#create_streaming + module TranscriptionCreateResponse + extend OpenAI::Internal::Type::Union + # Represents a transcription response returned by model, based on the provided input. - variant -> { OpenAI::Models::Audio::Transcription } + variant -> { OpenAI::Audio::Transcription } # Represents a verbose json transcription response returned by model, based on the provided input. - variant -> { OpenAI::Models::Audio::TranscriptionVerbose } + variant -> { OpenAI::Audio::TranscriptionVerbose } + + # @!method self.variants + # @return [Array(OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose)] end end end diff --git a/lib/openai/models/audio/transcription_include.rb b/lib/openai/models/audio/transcription_include.rb new file mode 100644 index 00000000..2351452b --- /dev/null +++ b/lib/openai/models/audio/transcription_include.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Audio + module TranscriptionInclude + extend OpenAI::Internal::Type::Enum + + LOGPROBS = :logprobs + + # @!method self.values + # @return [Array] + end + end + end +end diff --git a/lib/openai/models/audio/transcription_segment.rb b/lib/openai/models/audio/transcription_segment.rb index b219af36..3ca8d867 100644 --- a/lib/openai/models/audio/transcription_segment.rb +++ b/lib/openai/models/audio/transcription_segment.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Audio - class TranscriptionSegment < OpenAI::BaseModel + class TranscriptionSegment < OpenAI::Internal::Type::BaseModel # @!attribute id # Unique identifier of the segment. # @@ -12,14 +12,14 @@ class TranscriptionSegment < OpenAI::BaseModel # @!attribute avg_logprob # Average logprob of the segment. If the value is lower than -1, consider the - # logprobs failed. + # logprobs failed. # # @return [Float] required :avg_logprob, Float # @!attribute compression_ratio # Compression ratio of the segment. If the value is greater than 2.4, consider the - # compression failed. + # compression failed. # # @return [Float] required :compression_ratio, Float @@ -32,7 +32,7 @@ class TranscriptionSegment < OpenAI::BaseModel # @!attribute no_speech_prob # Probability of no speech in the segment. If the value is higher than 1.0 and the - # `avg_logprob` is below -1, consider this segment silent. + # `avg_logprob` is below -1, consider this segment silent. # # @return [Float] required :no_speech_prob, Float @@ -65,37 +65,31 @@ class TranscriptionSegment < OpenAI::BaseModel # Array of token IDs for the text content. # # @return [Array] - required :tokens, OpenAI::ArrayOf[Integer] + required :tokens, OpenAI::Internal::Type::ArrayOf[Integer] - # @!parse - # # @param id [Integer] - # # @param avg_logprob [Float] - # # @param compression_ratio [Float] - # # @param end_ [Float] - # # @param no_speech_prob [Float] - # # @param seek [Integer] - # # @param start [Float] - # # @param temperature [Float] - # # @param text [String] - # # @param tokens [Array] - # # - # def initialize( - # id:, - # avg_logprob:, - # compression_ratio:, - # end_:, - # no_speech_prob:, - # seek:, - # start:, - # temperature:, - # text:, - # tokens:, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, avg_logprob:, compression_ratio:, end_:, no_speech_prob:, seek:, start:, temperature:, text:, tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranscriptionSegment} for more details. + # + # @param id [Integer] Unique identifier of the segment. + # + # @param avg_logprob [Float] Average logprob of the segment. If the value is lower than -1, consider the logp + # + # @param compression_ratio [Float] Compression ratio of the segment. If the value is greater than 2.4, consider the + # + # @param end_ [Float] End time of the segment in seconds. + # + # @param no_speech_prob [Float] Probability of no speech in the segment. If the value is higher than 1.0 and the + # + # @param seek [Integer] Seek offset of the segment. + # + # @param start [Float] Start time of the segment in seconds. + # + # @param temperature [Float] Temperature parameter used for generating the segment. + # + # @param text [String] Text content of the segment. + # + # @param tokens [Array] Array of token IDs for the text content. end end end diff --git a/lib/openai/models/audio/transcription_stream_event.rb b/lib/openai/models/audio/transcription_stream_event.rb new file mode 100644 index 00000000..2112080e --- /dev/null +++ b/lib/openai/models/audio/transcription_stream_event.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Audio + # Emitted when there is an additional text delta. This is also the first event + # emitted when the transcription starts. Only emitted when you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `Stream` parameter set to `true`. + module TranscriptionStreamEvent + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Emitted when there is an additional text delta. This is also the first event emitted when the transcription starts. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `Stream` parameter set to `true`. + variant :"transcript.text.delta", -> { OpenAI::Audio::TranscriptionTextDeltaEvent } + + # Emitted when the transcription is complete. Contains the complete transcription text. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `Stream` parameter set to `true`. + variant :"transcript.text.done", -> { OpenAI::Audio::TranscriptionTextDoneEvent } + + # @!method self.variants + # @return [Array(OpenAI::Models::Audio::TranscriptionTextDeltaEvent, OpenAI::Models::Audio::TranscriptionTextDoneEvent)] + end + end + end +end diff --git a/lib/openai/models/audio/transcription_text_delta_event.rb b/lib/openai/models/audio/transcription_text_delta_event.rb new file mode 100644 index 00000000..4c54ea63 --- /dev/null +++ b/lib/openai/models/audio/transcription_text_delta_event.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Audio + class TranscriptionTextDeltaEvent < OpenAI::Internal::Type::BaseModel + # @!attribute delta + # The text delta that was additionally transcribed. + # + # @return [String] + required :delta, String + + # @!attribute type + # The type of the event. Always `transcript.text.delta`. + # + # @return [Symbol, :"transcript.text.delta"] + required :type, const: :"transcript.text.delta" + + # @!attribute logprobs + # The log probabilities of the delta. Only included if you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `include[]` parameter set to `logprobs`. + # + # @return [Array, nil] + optional :logprobs, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob] } + + # @!method initialize(delta:, logprobs: nil, type: :"transcript.text.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranscriptionTextDeltaEvent} for more details. + # + # Emitted when there is an additional text delta. This is also the first event + # emitted when the transcription starts. Only emitted when you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `Stream` parameter set to `true`. + # + # @param delta [String] The text delta that was additionally transcribed. + # + # @param logprobs [Array] The log probabilities of the delta. Only included if you [create a transcription + # + # @param type [Symbol, :"transcript.text.delta"] The type of the event. Always `transcript.text.delta`. + + class Logprob < OpenAI::Internal::Type::BaseModel + # @!attribute token + # The token that was used to generate the log probability. + # + # @return [String, nil] + optional :token, String + + # @!attribute bytes + # The bytes that were used to generate the log probability. + # + # @return [Array, nil] + optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer] + + # @!attribute logprob + # The log probability of the token. + # + # @return [Float, nil] + optional :logprob, Float + + # @!method initialize(token: nil, bytes: nil, logprob: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranscriptionTextDeltaEvent::Logprob} for more details. + # + # @param token [String] The token that was used to generate the log probability. + # + # @param bytes [Array] The bytes that were used to generate the log probability. + # + # @param logprob [Float] The log probability of the token. + end + end + end + end +end diff --git a/lib/openai/models/audio/transcription_text_done_event.rb b/lib/openai/models/audio/transcription_text_done_event.rb new file mode 100644 index 00000000..f49f062a --- /dev/null +++ b/lib/openai/models/audio/transcription_text_done_event.rb @@ -0,0 +1,152 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Audio + class TranscriptionTextDoneEvent < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text that was transcribed. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the event. Always `transcript.text.done`. + # + # @return [Symbol, :"transcript.text.done"] + required :type, const: :"transcript.text.done" + + # @!attribute logprobs + # The log probabilities of the individual tokens in the transcription. Only + # included if you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `include[]` parameter set to `logprobs`. + # + # @return [Array, nil] + optional :logprobs, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob] } + + # @!attribute usage + # Usage statistics for models billed by token usage. + # + # @return [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage, nil] + optional :usage, -> { OpenAI::Audio::TranscriptionTextDoneEvent::Usage } + + # @!method initialize(text:, logprobs: nil, usage: nil, type: :"transcript.text.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranscriptionTextDoneEvent} for more details. + # + # Emitted when the transcription is complete. Contains the complete transcription + # text. Only emitted when you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `Stream` parameter set to `true`. + # + # @param text [String] The text that was transcribed. + # + # @param logprobs [Array] The log probabilities of the individual tokens in the transcription. Only includ + # + # @param usage [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage] Usage statistics for models billed by token usage. + # + # @param type [Symbol, :"transcript.text.done"] The type of the event. Always `transcript.text.done`. + + class Logprob < OpenAI::Internal::Type::BaseModel + # @!attribute token + # The token that was used to generate the log probability. + # + # @return [String, nil] + optional :token, String + + # @!attribute bytes + # The bytes that were used to generate the log probability. + # + # @return [Array, nil] + optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer] + + # @!attribute logprob + # The log probability of the token. + # + # @return [Float, nil] + optional :logprob, Float + + # @!method initialize(token: nil, bytes: nil, logprob: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranscriptionTextDoneEvent::Logprob} for more details. + # + # @param token [String] The token that was used to generate the log probability. + # + # @param bytes [Array] The bytes that were used to generate the log probability. + # + # @param logprob [Float] The log probability of the token. + end + + # @see OpenAI::Models::Audio::TranscriptionTextDoneEvent#usage + class Usage < OpenAI::Internal::Type::BaseModel + # @!attribute input_tokens + # Number of input tokens billed for this request. + # + # @return [Integer] + required :input_tokens, Integer + + # @!attribute output_tokens + # Number of output tokens generated. + # + # @return [Integer] + required :output_tokens, Integer + + # @!attribute total_tokens + # Total number of tokens used (input + output). + # + # @return [Integer] + required :total_tokens, Integer + + # @!attribute type + # The type of the usage object. Always `tokens` for this variant. + # + # @return [Symbol, :tokens] + required :type, const: :tokens + + # @!attribute input_token_details + # Details about the input tokens billed for this request. + # + # @return [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails, nil] + optional :input_token_details, -> { OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails } + + # @!method initialize(input_tokens:, output_tokens:, total_tokens:, input_token_details: nil, type: :tokens) + # Usage statistics for models billed by token usage. + # + # @param input_tokens [Integer] Number of input tokens billed for this request. + # + # @param output_tokens [Integer] Number of output tokens generated. + # + # @param total_tokens [Integer] Total number of tokens used (input + output). + # + # @param input_token_details [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails] Details about the input tokens billed for this request. + # + # @param type [Symbol, :tokens] The type of the usage object. Always `tokens` for this variant. + + # @see OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage#input_token_details + class InputTokenDetails < OpenAI::Internal::Type::BaseModel + # @!attribute audio_tokens + # Number of audio tokens billed for this request. + # + # @return [Integer, nil] + optional :audio_tokens, Integer + + # @!attribute text_tokens + # Number of text tokens billed for this request. + # + # @return [Integer, nil] + optional :text_tokens, Integer + + # @!method initialize(audio_tokens: nil, text_tokens: nil) + # Details about the input tokens billed for this request. + # + # @param audio_tokens [Integer] Number of audio tokens billed for this request. + # + # @param text_tokens [Integer] Number of text tokens billed for this request. + end + end + end + end + end +end diff --git a/lib/openai/models/audio/transcription_verbose.rb b/lib/openai/models/audio/transcription_verbose.rb index 3b66f702..678f54dd 100644 --- a/lib/openai/models/audio/transcription_verbose.rb +++ b/lib/openai/models/audio/transcription_verbose.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Audio - class TranscriptionVerbose < OpenAI::BaseModel + class TranscriptionVerbose < OpenAI::Internal::Type::BaseModel # @!attribute duration # The duration of the input audio. # @@ -22,39 +22,61 @@ class TranscriptionVerbose < OpenAI::BaseModel # @return [String] required :text, String - # @!attribute [r] segments + # @!attribute segments # Segments of the transcribed text and their corresponding details. # # @return [Array, nil] - optional :segments, -> { OpenAI::ArrayOf[OpenAI::Models::Audio::TranscriptionSegment] } + optional :segments, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionSegment] } - # @!parse - # # @return [Array] - # attr_writer :segments + # @!attribute usage + # Usage statistics for models billed by audio input duration. + # + # @return [OpenAI::Models::Audio::TranscriptionVerbose::Usage, nil] + optional :usage, -> { OpenAI::Audio::TranscriptionVerbose::Usage } - # @!attribute [r] words + # @!attribute words # Extracted words and their corresponding timestamps. # # @return [Array, nil] - optional :words, -> { OpenAI::ArrayOf[OpenAI::Models::Audio::TranscriptionWord] } - - # @!parse - # # @return [Array] - # attr_writer :words - - # @!parse - # # Represents a verbose json transcription response returned by model, based on the - # # provided input. - # # - # # @param duration [Float] - # # @param language [String] - # # @param text [String] - # # @param segments [Array] - # # @param words [Array] - # # - # def initialize(duration:, language:, text:, segments: nil, words: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :words, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionWord] } + + # @!method initialize(duration:, language:, text:, segments: nil, usage: nil, words: nil) + # Represents a verbose json transcription response returned by model, based on the + # provided input. + # + # @param duration [Float] The duration of the input audio. + # + # @param language [String] The language of the input audio. + # + # @param text [String] The transcribed text. + # + # @param segments [Array] Segments of the transcribed text and their corresponding details. + # + # @param usage [OpenAI::Models::Audio::TranscriptionVerbose::Usage] Usage statistics for models billed by audio input duration. + # + # @param words [Array] Extracted words and their corresponding timestamps. + + # @see OpenAI::Models::Audio::TranscriptionVerbose#usage + class Usage < OpenAI::Internal::Type::BaseModel + # @!attribute seconds + # Duration of the input audio in seconds. + # + # @return [Float] + required :seconds, Float + + # @!attribute type + # The type of the usage object. Always `duration` for this variant. + # + # @return [Symbol, :duration] + required :type, const: :duration + + # @!method initialize(seconds:, type: :duration) + # Usage statistics for models billed by audio input duration. + # + # @param seconds [Float] Duration of the input audio in seconds. + # + # @param type [Symbol, :duration] The type of the usage object. Always `duration` for this variant. + end end end end diff --git a/lib/openai/models/audio/transcription_word.rb b/lib/openai/models/audio/transcription_word.rb index f331b86c..b9e5da59 100644 --- a/lib/openai/models/audio/transcription_word.rb +++ b/lib/openai/models/audio/transcription_word.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Audio - class TranscriptionWord < OpenAI::BaseModel + class TranscriptionWord < OpenAI::Internal::Type::BaseModel # @!attribute end_ # End time of the word in seconds. # @@ -22,14 +22,12 @@ class TranscriptionWord < OpenAI::BaseModel # @return [String] required :word, String - # @!parse - # # @param end_ [Float] - # # @param start [Float] - # # @param word [String] - # # - # def initialize(end_:, start:, word:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(end_:, start:, word:) + # @param end_ [Float] End time of the word in seconds. + # + # @param start [Float] Start time of the word in seconds. + # + # @param word [String] The text content of the word. end end end diff --git a/lib/openai/models/audio/translation.rb b/lib/openai/models/audio/translation.rb index 7b9597ca..635498f8 100644 --- a/lib/openai/models/audio/translation.rb +++ b/lib/openai/models/audio/translation.rb @@ -3,18 +3,14 @@ module OpenAI module Models module Audio - class Translation < OpenAI::BaseModel + class Translation < OpenAI::Internal::Type::BaseModel # @!attribute text # # @return [String] required :text, String - # @!parse - # # @param text [String] - # # - # def initialize(text:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(text:) + # @param text [String] end end end diff --git a/lib/openai/models/audio/translation_create_params.rb b/lib/openai/models/audio/translation_create_params.rb index 556b8419..35e3dd1b 100644 --- a/lib/openai/models/audio/translation_create_params.rb +++ b/lib/openai/models/audio/translation_create_params.rb @@ -3,84 +3,94 @@ module OpenAI module Models module Audio - class TranslationCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Audio::Translations#create + class TranslationCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute file # The audio file object (not file name) translate, in one of these formats: flac, - # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. # - # @return [IO, StringIO] - required :file, IO + # @return [Pathname, StringIO, IO, String, OpenAI::FilePart] + required :file, OpenAI::Internal::Type::FileInput # @!attribute model # ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. + # Whisper V2 model) is currently available. # # @return [String, Symbol, OpenAI::Models::AudioModel] - required :model, union: -> { OpenAI::Models::Audio::TranslationCreateParams::Model } + required :model, union: -> { OpenAI::Audio::TranslationCreateParams::Model } - # @!attribute [r] prompt + # @!attribute prompt # An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should be in English. + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. # # @return [String, nil] optional :prompt, String - # @!parse - # # @return [String] - # attr_writer :prompt - - # @!attribute [r] response_format + # @!attribute response_format # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. + # `verbose_json`, or `vtt`. # - # @return [Symbol, OpenAI::Models::AudioResponseFormat, nil] - optional :response_format, enum: -> { OpenAI::Models::AudioResponseFormat } - - # @!parse - # # @return [Symbol, OpenAI::Models::AudioResponseFormat] - # attr_writer :response_format + # @return [Symbol, OpenAI::Models::Audio::TranslationCreateParams::ResponseFormat, nil] + optional :response_format, enum: -> { OpenAI::Audio::TranslationCreateParams::ResponseFormat } - # @!attribute [r] temperature + # @!attribute temperature # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. # # @return [Float, nil] optional :temperature, Float - # @!parse - # # @return [Float] - # attr_writer :temperature - - # @!parse - # # @param file [IO, StringIO] - # # @param model [String, Symbol, OpenAI::Models::AudioModel] - # # @param prompt [String] - # # @param response_format [Symbol, OpenAI::Models::AudioResponseFormat] - # # @param temperature [Float] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(file:, model:, prompt: nil, response_format: nil, temperature: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(file:, model:, prompt: nil, response_format: nil, temperature: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranslationCreateParams} for more details. + # + # @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) translate, in one of these formats: flac, + # + # @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. Only `whisper-1` (which is powered by our open source Wh + # + # @param prompt [String] An optional text to guide the model's style or continue a previous audio segment + # + # @param response_format [Symbol, OpenAI::Models::Audio::TranslationCreateParams::ResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo # + # @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. - class Model < OpenAI::Union + # Whisper V2 model) is currently available. + module Model + extend OpenAI::Internal::Type::Union + variant String # ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. - variant enum: -> { OpenAI::Models::AudioModel } + variant enum: -> { OpenAI::AudioModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::AudioModel)] + end + + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. + module ResponseFormat + extend OpenAI::Internal::Type::Enum + + JSON = :json + TEXT = :text + SRT = :srt + VERBOSE_JSON = :verbose_json + VTT = :vtt + + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/audio/translation_create_response.rb b/lib/openai/models/audio/translation_create_response.rb index 278a37e5..7e056468 100644 --- a/lib/openai/models/audio/translation_create_response.rb +++ b/lib/openai/models/audio/translation_create_response.rb @@ -3,12 +3,16 @@ module OpenAI module Models module Audio - # @abstract - # - class TranslationCreateResponse < OpenAI::Union - variant -> { OpenAI::Models::Audio::Translation } + # @see OpenAI::Resources::Audio::Translations#create + module TranslationCreateResponse + extend OpenAI::Internal::Type::Union - variant -> { OpenAI::Models::Audio::TranslationVerbose } + variant -> { OpenAI::Audio::Translation } + + variant -> { OpenAI::Audio::TranslationVerbose } + + # @!method self.variants + # @return [Array(OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose)] end end end diff --git a/lib/openai/models/audio/translation_verbose.rb b/lib/openai/models/audio/translation_verbose.rb index eb6ecfdd..1bb16b1e 100644 --- a/lib/openai/models/audio/translation_verbose.rb +++ b/lib/openai/models/audio/translation_verbose.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Audio - class TranslationVerbose < OpenAI::BaseModel + class TranslationVerbose < OpenAI::Internal::Type::BaseModel # @!attribute duration # The duration of the input audio. # @@ -22,25 +22,20 @@ class TranslationVerbose < OpenAI::BaseModel # @return [String] required :text, String - # @!attribute [r] segments + # @!attribute segments # Segments of the translated text and their corresponding details. # # @return [Array, nil] - optional :segments, -> { OpenAI::ArrayOf[OpenAI::Models::Audio::TranscriptionSegment] } + optional :segments, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionSegment] } - # @!parse - # # @return [Array] - # attr_writer :segments - - # @!parse - # # @param duration [Float] - # # @param language [String] - # # @param text [String] - # # @param segments [Array] - # # - # def initialize(duration:, language:, text:, segments: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(duration:, language:, text:, segments: nil) + # @param duration [Float] The duration of the input audio. + # + # @param language [String] The language of the output translation (always `english`). + # + # @param text [String] The translated text. + # + # @param segments [Array] Segments of the translated text and their corresponding details. end end end diff --git a/lib/openai/models/audio_model.rb b/lib/openai/models/audio_model.rb index 1043030f..8e0e194e 100644 --- a/lib/openai/models/audio_model.rb +++ b/lib/openai/models/audio_model.rb @@ -2,12 +2,15 @@ module OpenAI module Models - # @abstract - # - class AudioModel < OpenAI::Enum + module AudioModel + extend OpenAI::Internal::Type::Enum + WHISPER_1 = :"whisper-1" + GPT_4O_TRANSCRIBE = :"gpt-4o-transcribe" + GPT_4O_MINI_TRANSCRIBE = :"gpt-4o-mini-transcribe" - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/audio_response_format.rb b/lib/openai/models/audio_response_format.rb index 8b92a3b9..5644ca89 100644 --- a/lib/openai/models/audio_response_format.rb +++ b/lib/openai/models/audio_response_format.rb @@ -2,18 +2,20 @@ module OpenAI module Models - # @abstract - # # The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. - class AudioResponseFormat < OpenAI::Enum + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. + module AudioResponseFormat + extend OpenAI::Internal::Type::Enum + JSON = :json TEXT = :text SRT = :srt VERBOSE_JSON = :verbose_json VTT = :vtt - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/auto_file_chunking_strategy_param.rb b/lib/openai/models/auto_file_chunking_strategy_param.rb index 124d43f7..0bb7685f 100644 --- a/lib/openai/models/auto_file_chunking_strategy_param.rb +++ b/lib/openai/models/auto_file_chunking_strategy_param.rb @@ -2,22 +2,18 @@ module OpenAI module Models - class AutoFileChunkingStrategyParam < OpenAI::BaseModel + class AutoFileChunkingStrategyParam < OpenAI::Internal::Type::BaseModel # @!attribute type # Always `auto`. # # @return [Symbol, :auto] required :type, const: :auto - # @!parse - # # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # # `800` and `chunk_overlap_tokens` of `400`. - # # - # # @param type [Symbol, :auto] - # # - # def initialize(type: :auto, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :auto) + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. + # + # @param type [Symbol, :auto] Always `auto`. end end end diff --git a/lib/openai/models/batch.rb b/lib/openai/models/batch.rb index 91778516..b8dffe10 100644 --- a/lib/openai/models/batch.rb +++ b/lib/openai/models/batch.rb @@ -2,7 +2,8 @@ module OpenAI module Models - class Batch < OpenAI::BaseModel + # @see OpenAI::Resources::Batches#create + class Batch < OpenAI::Internal::Type::BaseModel # @!attribute id # # @return [String] @@ -42,192 +43,140 @@ class Batch < OpenAI::BaseModel # The current status of the batch. # # @return [Symbol, OpenAI::Models::Batch::Status] - required :status, enum: -> { OpenAI::Models::Batch::Status } + required :status, enum: -> { OpenAI::Batch::Status } - # @!attribute [r] cancelled_at + # @!attribute cancelled_at # The Unix timestamp (in seconds) for when the batch was cancelled. # # @return [Integer, nil] optional :cancelled_at, Integer - # @!parse - # # @return [Integer] - # attr_writer :cancelled_at - - # @!attribute [r] cancelling_at + # @!attribute cancelling_at # The Unix timestamp (in seconds) for when the batch started cancelling. # # @return [Integer, nil] optional :cancelling_at, Integer - # @!parse - # # @return [Integer] - # attr_writer :cancelling_at - - # @!attribute [r] completed_at + # @!attribute completed_at # The Unix timestamp (in seconds) for when the batch was completed. # # @return [Integer, nil] optional :completed_at, Integer - # @!parse - # # @return [Integer] - # attr_writer :completed_at - - # @!attribute [r] error_file_id + # @!attribute error_file_id # The ID of the file containing the outputs of requests with errors. # # @return [String, nil] optional :error_file_id, String - # @!parse - # # @return [String] - # attr_writer :error_file_id - - # @!attribute [r] errors + # @!attribute errors # # @return [OpenAI::Models::Batch::Errors, nil] - optional :errors, -> { OpenAI::Models::Batch::Errors } - - # @!parse - # # @return [OpenAI::Models::Batch::Errors] - # attr_writer :errors + optional :errors, -> { OpenAI::Batch::Errors } - # @!attribute [r] expired_at + # @!attribute expired_at # The Unix timestamp (in seconds) for when the batch expired. # # @return [Integer, nil] optional :expired_at, Integer - # @!parse - # # @return [Integer] - # attr_writer :expired_at - - # @!attribute [r] expires_at + # @!attribute expires_at # The Unix timestamp (in seconds) for when the batch will expire. # # @return [Integer, nil] optional :expires_at, Integer - # @!parse - # # @return [Integer] - # attr_writer :expires_at - - # @!attribute [r] failed_at + # @!attribute failed_at # The Unix timestamp (in seconds) for when the batch failed. # # @return [Integer, nil] optional :failed_at, Integer - # @!parse - # # @return [Integer] - # attr_writer :failed_at - - # @!attribute [r] finalizing_at + # @!attribute finalizing_at # The Unix timestamp (in seconds) for when the batch started finalizing. # # @return [Integer, nil] optional :finalizing_at, Integer - # @!parse - # # @return [Integer] - # attr_writer :finalizing_at - - # @!attribute [r] in_progress_at + # @!attribute in_progress_at # The Unix timestamp (in seconds) for when the batch started processing. # # @return [Integer, nil] optional :in_progress_at, Integer - # @!parse - # # @return [Integer] - # attr_writer :in_progress_at - # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!attribute [r] output_file_id + # @!attribute output_file_id # The ID of the file containing the outputs of successfully executed requests. # # @return [String, nil] optional :output_file_id, String - # @!parse - # # @return [String] - # attr_writer :output_file_id - - # @!attribute [r] request_counts + # @!attribute request_counts # The request counts for different statuses within the batch. # # @return [OpenAI::Models::BatchRequestCounts, nil] - optional :request_counts, -> { OpenAI::Models::BatchRequestCounts } - - # @!parse - # # @return [OpenAI::Models::BatchRequestCounts] - # attr_writer :request_counts - - # @!parse - # # @param id [String] - # # @param completion_window [String] - # # @param created_at [Integer] - # # @param endpoint [String] - # # @param input_file_id [String] - # # @param status [Symbol, OpenAI::Models::Batch::Status] - # # @param cancelled_at [Integer] - # # @param cancelling_at [Integer] - # # @param completed_at [Integer] - # # @param error_file_id [String] - # # @param errors [OpenAI::Models::Batch::Errors] - # # @param expired_at [Integer] - # # @param expires_at [Integer] - # # @param failed_at [Integer] - # # @param finalizing_at [Integer] - # # @param in_progress_at [Integer] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param output_file_id [String] - # # @param request_counts [OpenAI::Models::BatchRequestCounts] - # # @param object [Symbol, :batch] - # # - # def initialize( - # id:, - # completion_window:, - # created_at:, - # endpoint:, - # input_file_id:, - # status:, - # cancelled_at: nil, - # cancelling_at: nil, - # completed_at: nil, - # error_file_id: nil, - # errors: nil, - # expired_at: nil, - # expires_at: nil, - # failed_at: nil, - # finalizing_at: nil, - # in_progress_at: nil, - # metadata: nil, - # output_file_id: nil, - # request_counts: nil, - # object: :batch, - # ** - # ) - # super - # end + optional :request_counts, -> { OpenAI::BatchRequestCounts } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(id:, completion_window:, created_at:, endpoint:, input_file_id:, status:, cancelled_at: nil, cancelling_at: nil, completed_at: nil, error_file_id: nil, errors: nil, expired_at: nil, expires_at: nil, failed_at: nil, finalizing_at: nil, in_progress_at: nil, metadata: nil, output_file_id: nil, request_counts: nil, object: :batch) + # Some parameter documentations has been truncated, see {OpenAI::Models::Batch} + # for more details. + # + # @param id [String] + # + # @param completion_window [String] The time frame within which the batch should be processed. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the batch was created. + # + # @param endpoint [String] The OpenAI API endpoint used by the batch. + # + # @param input_file_id [String] The ID of the input file for the batch. + # + # @param status [Symbol, OpenAI::Models::Batch::Status] The current status of the batch. # + # @param cancelled_at [Integer] The Unix timestamp (in seconds) for when the batch was cancelled. + # + # @param cancelling_at [Integer] The Unix timestamp (in seconds) for when the batch started cancelling. + # + # @param completed_at [Integer] The Unix timestamp (in seconds) for when the batch was completed. + # + # @param error_file_id [String] The ID of the file containing the outputs of requests with errors. + # + # @param errors [OpenAI::Models::Batch::Errors] + # + # @param expired_at [Integer] The Unix timestamp (in seconds) for when the batch expired. + # + # @param expires_at [Integer] The Unix timestamp (in seconds) for when the batch will expire. + # + # @param failed_at [Integer] The Unix timestamp (in seconds) for when the batch failed. + # + # @param finalizing_at [Integer] The Unix timestamp (in seconds) for when the batch started finalizing. + # + # @param in_progress_at [Integer] The Unix timestamp (in seconds) for when the batch started processing. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param output_file_id [String] The ID of the file containing the outputs of successfully executed requests. + # + # @param request_counts [OpenAI::Models::BatchRequestCounts] The request counts for different statuses within the batch. + # + # @param object [Symbol, :batch] The object type, which is always `batch`. + # The current status of the batch. - class Status < OpenAI::Enum + # + # @see OpenAI::Models::Batch#status + module Status + extend OpenAI::Internal::Type::Enum + VALIDATING = :validating FAILED = :failed IN_PROGRESS = :in_progress @@ -237,36 +186,27 @@ class Status < OpenAI::Enum CANCELLING = :cancelling CANCELLED = :cancelled - finalize! + # @!method self.values + # @return [Array] end - class Errors < OpenAI::BaseModel - # @!attribute [r] data + # @see OpenAI::Models::Batch#errors + class Errors < OpenAI::Internal::Type::BaseModel + # @!attribute data # # @return [Array, nil] - optional :data, -> { OpenAI::ArrayOf[OpenAI::Models::BatchError] } - - # @!parse - # # @return [Array] - # attr_writer :data + optional :data, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::BatchError] } - # @!attribute [r] object + # @!attribute object # The object type, which is always `list`. # # @return [String, nil] optional :object, String - # @!parse - # # @return [String] - # attr_writer :object - - # @!parse - # # @param data [Array] - # # @param object [String] - # # - # def initialize(data: nil, object: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data: nil, object: nil) + # @param data [Array] + # + # @param object [String] The object type, which is always `list`. end end end diff --git a/lib/openai/models/batch_cancel_params.rb b/lib/openai/models/batch_cancel_params.rb index 3e1d39cd..9068ce31 100644 --- a/lib/openai/models/batch_cancel_params.rb +++ b/lib/openai/models/batch_cancel_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class BatchCancelParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Batches#cancel + class BatchCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/batch_create_params.rb b/lib/openai/models/batch_create_params.rb index 8fa58e3f..bdb51f2e 100644 --- a/lib/openai/models/batch_create_params.rb +++ b/lib/openai/models/batch_create_params.rb @@ -2,85 +2,128 @@ module OpenAI module Models - class BatchCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Batches#create + class BatchCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute completion_window # The time frame within which the batch should be processed. Currently only `24h` - # is supported. + # is supported. # # @return [Symbol, OpenAI::Models::BatchCreateParams::CompletionWindow] - required :completion_window, enum: -> { OpenAI::Models::BatchCreateParams::CompletionWindow } + required :completion_window, enum: -> { OpenAI::BatchCreateParams::CompletionWindow } # @!attribute endpoint # The endpoint to be used for all requests in the batch. Currently - # `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - # Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - # embedding inputs across all requests in the batch. + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. # # @return [Symbol, OpenAI::Models::BatchCreateParams::Endpoint] - required :endpoint, enum: -> { OpenAI::Models::BatchCreateParams::Endpoint } + required :endpoint, enum: -> { OpenAI::BatchCreateParams::Endpoint } # @!attribute input_file_id # The ID of an uploaded file that contains requests for the new batch. # - # See [upload file](https://platform.openai.com/docs/api-reference/files/create) - # for how to upload a file. + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. # - # Your input file must be formatted as a - # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), - # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - # requests, and can be up to 200 MB in size. + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. # # @return [String] required :input_file_id, String # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true - - # @!parse - # # @param completion_window [Symbol, OpenAI::Models::BatchCreateParams::CompletionWindow] - # # @param endpoint [Symbol, OpenAI::Models::BatchCreateParams::Endpoint] - # # @param input_file_id [String] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(completion_window:, endpoint:, input_file_id:, metadata: nil, request_options: {}, **) = super + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!attribute output_expires_after + # The expiration policy for the output and/or error file that are generated for a + # batch. + # + # @return [OpenAI::Models::BatchCreateParams::OutputExpiresAfter, nil] + optional :output_expires_after, -> { OpenAI::BatchCreateParams::OutputExpiresAfter } - # @abstract + # @!method initialize(completion_window:, endpoint:, input_file_id:, metadata: nil, output_expires_after: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::BatchCreateParams} for more details. + # + # @param completion_window [Symbol, OpenAI::Models::BatchCreateParams::CompletionWindow] The time frame within which the batch should be processed. Currently only `24h` + # + # @param endpoint [Symbol, OpenAI::Models::BatchCreateParams::Endpoint] The endpoint to be used for all requests in the batch. Currently `/v1/responses` # + # @param input_file_id [String] The ID of an uploaded file that contains requests for the new batch. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param output_expires_after [OpenAI::Models::BatchCreateParams::OutputExpiresAfter] The expiration policy for the output and/or error file that are generated for a + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # The time frame within which the batch should be processed. Currently only `24h` - # is supported. - class CompletionWindow < OpenAI::Enum - NUMBER_24H = :"24h" + # is supported. + module CompletionWindow + extend OpenAI::Internal::Type::Enum + + COMPLETION_WINDOW_24H = :"24h" - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The endpoint to be used for all requests in the batch. Currently - # `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - # Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - # embedding inputs across all requests in the batch. - class Endpoint < OpenAI::Enum + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. + module Endpoint + extend OpenAI::Internal::Type::Enum + + V1_RESPONSES = :"/v1/responses" V1_CHAT_COMPLETIONS = :"/v1/chat/completions" V1_EMBEDDINGS = :"/v1/embeddings" V1_COMPLETIONS = :"/v1/completions" - finalize! + # @!method self.values + # @return [Array] + end + + class OutputExpiresAfter < OpenAI::Internal::Type::BaseModel + # @!attribute anchor + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `created_at`. Note that the anchor is the file creation time, not the time the + # batch is created. + # + # @return [Symbol, :created_at] + required :anchor, const: :created_at + + # @!attribute seconds + # The number of seconds after the anchor time that the file will expire. Must be + # between 3600 (1 hour) and 2592000 (30 days). + # + # @return [Integer] + required :seconds, Integer + + # @!method initialize(seconds:, anchor: :created_at) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::BatchCreateParams::OutputExpiresAfter} for more details. + # + # The expiration policy for the output and/or error file that are generated for a + # batch. + # + # @param seconds [Integer] The number of seconds after the anchor time that the file will expire. Must be b + # + # @param anchor [Symbol, :created_at] Anchor timestamp after which the expiration policy applies. Supported anchors: ` end end end diff --git a/lib/openai/models/batch_error.rb b/lib/openai/models/batch_error.rb index 5333a1ab..538e1183 100644 --- a/lib/openai/models/batch_error.rb +++ b/lib/openai/models/batch_error.rb @@ -2,48 +2,39 @@ module OpenAI module Models - class BatchError < OpenAI::BaseModel - # @!attribute [r] code + class BatchError < OpenAI::Internal::Type::BaseModel + # @!attribute code # An error code identifying the error type. # # @return [String, nil] optional :code, String - # @!parse - # # @return [String] - # attr_writer :code - # @!attribute line # The line number of the input file where the error occurred, if applicable. # # @return [Integer, nil] optional :line, Integer, nil?: true - # @!attribute [r] message + # @!attribute message # A human-readable message providing more details about the error. # # @return [String, nil] optional :message, String - # @!parse - # # @return [String] - # attr_writer :message - # @!attribute param # The name of the parameter that caused the error, if applicable. # # @return [String, nil] optional :param, String, nil?: true - # @!parse - # # @param code [String] - # # @param line [Integer, nil] - # # @param message [String] - # # @param param [String, nil] - # # - # def initialize(code: nil, line: nil, message: nil, param: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(code: nil, line: nil, message: nil, param: nil) + # @param code [String] An error code identifying the error type. + # + # @param line [Integer, nil] The line number of the input file where the error occurred, if applicable. + # + # @param message [String] A human-readable message providing more details about the error. + # + # @param param [String, nil] The name of the parameter that caused the error, if applicable. end end end diff --git a/lib/openai/models/batch_list_params.rb b/lib/openai/models/batch_list_params.rb index 368bc08b..1a01f061 100644 --- a/lib/openai/models/batch_list_params.rb +++ b/lib/openai/models/batch_list_params.rb @@ -2,43 +2,36 @@ module OpenAI module Models - class BatchListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Batches#list + class BatchListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] limit + # @!attribute limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!parse - # # @param after [String] - # # @param limit [Integer] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, limit: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(after: nil, limit: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::BatchListParams} for more details. + # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/batch_request_counts.rb b/lib/openai/models/batch_request_counts.rb index 2029a404..e8e7caf0 100644 --- a/lib/openai/models/batch_request_counts.rb +++ b/lib/openai/models/batch_request_counts.rb @@ -2,7 +2,7 @@ module OpenAI module Models - class BatchRequestCounts < OpenAI::BaseModel + class BatchRequestCounts < OpenAI::Internal::Type::BaseModel # @!attribute completed # Number of requests that have been completed successfully. # @@ -21,16 +21,14 @@ class BatchRequestCounts < OpenAI::BaseModel # @return [Integer] required :total, Integer - # @!parse - # # The request counts for different statuses within the batch. - # # - # # @param completed [Integer] - # # @param failed [Integer] - # # @param total [Integer] - # # - # def initialize(completed:, failed:, total:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(completed:, failed:, total:) + # The request counts for different statuses within the batch. + # + # @param completed [Integer] Number of requests that have been completed successfully. + # + # @param failed [Integer] Number of requests that have failed. + # + # @param total [Integer] Total number of requests in the batch. end end end diff --git a/lib/openai/models/batch_retrieve_params.rb b/lib/openai/models/batch_retrieve_params.rb index 252ed4ca..a03157a4 100644 --- a/lib/openai/models/batch_retrieve_params.rb +++ b/lib/openai/models/batch_retrieve_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class BatchRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Batches#retrieve + class BatchRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/assistant.rb b/lib/openai/models/beta/assistant.rb index c7a250d4..7bbd1868 100644 --- a/lib/openai/models/beta/assistant.rb +++ b/lib/openai/models/beta/assistant.rb @@ -3,7 +3,8 @@ module OpenAI module Models module Beta - class Assistant < OpenAI::BaseModel + # @see OpenAI::Resources::Beta::Assistants#create + class Assistant < OpenAI::Internal::Type::BaseModel # @!attribute id # The identifier, which can be referenced in API endpoints. # @@ -24,28 +25,28 @@ class Assistant < OpenAI::BaseModel # @!attribute instructions # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. # # @return [String, nil] required :instructions, String, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - required :metadata, OpenAI::HashOf[String], nil?: true + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute model # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. # # @return [String] required :model, String @@ -64,175 +65,151 @@ class Assistant < OpenAI::BaseModel # @!attribute tools # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. # # @return [Array] - required :tools, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool] } + required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool] } # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. - # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - optional :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true + optional :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute tool_resources # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. # # @return [OpenAI::Models::Beta::Assistant::ToolResources, nil] - optional :tool_resources, -> { OpenAI::Models::Beta::Assistant::ToolResources }, nil?: true + optional :tool_resources, -> { OpenAI::Beta::Assistant::ToolResources }, nil?: true # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true - # @!parse - # # Represents an `assistant` that can call the model and use tools. - # # - # # @param id [String] - # # @param created_at [Integer] - # # @param description [String, nil] - # # @param instructions [String, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param model [String] - # # @param name [String, nil] - # # @param tools [Array] - # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - # # @param temperature [Float, nil] - # # @param tool_resources [OpenAI::Models::Beta::Assistant::ToolResources, nil] - # # @param top_p [Float, nil] - # # @param object [Symbol, :assistant] - # # - # def initialize( - # id:, - # created_at:, - # description:, - # instructions:, - # metadata:, - # model:, - # name:, - # tools:, - # response_format: nil, - # temperature: nil, - # tool_resources: nil, - # top_p: nil, - # object: :assistant, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class ToolResources < OpenAI::BaseModel - # @!attribute [r] code_interpreter + # @!method initialize(id:, created_at:, description:, instructions:, metadata:, model:, name:, tools:, response_format: nil, temperature: nil, tool_resources: nil, top_p: nil, object: :assistant) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Assistant} for more details. + # + # Represents an `assistant` that can call the model and use tools. + # + # @param id [String] The identifier, which can be referenced in API endpoints. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the assistant was created. + # + # @param description [String, nil] The description of the assistant. The maximum length is 512 characters. + # + # @param instructions [String, nil] The system instructions that the assistant uses. The maximum length is 256,000 c + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String] ID of the model to use. You can use the [List models](https://platform.openai.co + # + # @param name [String, nil] The name of the assistant. The maximum length is 256 characters. + # + # @param tools [Array] A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: + # + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m + # + # @param tool_resources [OpenAI::Models::Beta::Assistant::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe + # + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the + # + # @param object [Symbol, :assistant] The object type, which is always `assistant`. + + # @see OpenAI::Models::Beta::Assistant#tool_resources + class ToolResources < OpenAI::Internal::Type::BaseModel + # @!attribute code_interpreter # # @return [OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter, nil] - optional :code_interpreter, -> { OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter } - - # @!parse - # # @return [OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter] - # attr_writer :code_interpreter + optional :code_interpreter, -> { OpenAI::Beta::Assistant::ToolResources::CodeInterpreter } - # @!attribute [r] file_search + # @!attribute file_search # # @return [OpenAI::Models::Beta::Assistant::ToolResources::FileSearch, nil] - optional :file_search, -> { OpenAI::Models::Beta::Assistant::ToolResources::FileSearch } - - # @!parse - # # @return [OpenAI::Models::Beta::Assistant::ToolResources::FileSearch] - # attr_writer :file_search - - # @!parse - # # A set of resources that are used by the assistant's tools. The resources are - # # specific to the type of tool. For example, the `code_interpreter` tool requires - # # a list of file IDs, while the `file_search` tool requires a list of vector store - # # IDs. - # # - # # @param code_interpreter [OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter] - # # @param file_search [OpenAI::Models::Beta::Assistant::ToolResources::FileSearch] - # # - # def initialize(code_interpreter: nil, file_search: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class CodeInterpreter < OpenAI::BaseModel - # @!attribute [r] file_ids + optional :file_search, -> { OpenAI::Beta::Assistant::ToolResources::FileSearch } + + # @!method initialize(code_interpreter: nil, file_search: nil) + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + # + # @param code_interpreter [OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter] + # @param file_search [OpenAI::Models::Beta::Assistant::ToolResources::FileSearch] + + # @see OpenAI::Models::Beta::Assistant::ToolResources#code_interpreter + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + # @!attribute file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter`` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter`` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @return [Array] - # attr_writer :file_ids - - # @!parse - # # @param file_ids [Array] - # # - # def initialize(file_ids: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter} for more + # details. + # + # @param file_ids [Array] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made end - class FileSearch < OpenAI::BaseModel - # @!attribute [r] vector_store_ids + # @see OpenAI::Models::Beta::Assistant::ToolResources#file_search + class FileSearch < OpenAI::Internal::Type::BaseModel + # @!attribute vector_store_ids # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. # # @return [Array, nil] - optional :vector_store_ids, OpenAI::ArrayOf[String] + optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @return [Array] - # attr_writer :vector_store_ids - - # @!parse - # # @param vector_store_ids [Array] - # # - # def initialize(vector_store_ids: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Assistant::ToolResources::FileSearch} for more details. + # + # @param vector_store_ids [Array] The ID of the [vector store](https://platform.openai.com/docs/api-reference/vect end end end diff --git a/lib/openai/models/beta/assistant_create_params.rb b/lib/openai/models/beta/assistant_create_params.rb index de543e84..3fe326c7 100644 --- a/lib/openai/models/beta/assistant_create_params.rb +++ b/lib/openai/models/beta/assistant_create_params.rb @@ -3,20 +3,20 @@ module OpenAI module Models module Beta - class AssistantCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Assistants#create + class AssistantCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute model # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. # # @return [String, Symbol, OpenAI::Models::ChatModel] - required :model, union: -> { OpenAI::Models::Beta::AssistantCreateParams::Model } + required :model, union: -> { OpenAI::Beta::AssistantCreateParams::Model } # @!attribute description # The description of the assistant. The maximum length is 512 characters. @@ -26,21 +26,21 @@ class AssistantCreateParams < OpenAI::BaseModel # @!attribute instructions # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. # # @return [String, nil] optional :instructions, String, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute name # The name of the assistant. The maximum length is 256 characters. @@ -49,301 +49,263 @@ class AssistantCreateParams < OpenAI::BaseModel optional :name, String, nil?: true # @!attribute reasoning_effort - # **o-series models only** - # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] - optional :reasoning_effort, enum: -> { OpenAI::Models::ReasoningEffort }, nil?: true + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - optional :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true + optional :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute tool_resources # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. # # @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources, nil] - optional :tool_resources, -> { OpenAI::Models::Beta::AssistantCreateParams::ToolResources }, nil?: true + optional :tool_resources, -> { OpenAI::Beta::AssistantCreateParams::ToolResources }, nil?: true - # @!attribute [r] tools + # @!attribute tools # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. # # @return [Array, nil] - optional :tools, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool] } - - # @!parse - # # @return [Array] - # attr_writer :tools + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool] } # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true - # @!parse - # # @param model [String, Symbol, OpenAI::Models::ChatModel] - # # @param description [String, nil] - # # @param instructions [String, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param name [String, nil] - # # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] - # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - # # @param temperature [Float, nil] - # # @param tool_resources [OpenAI::Models::Beta::AssistantCreateParams::ToolResources, nil] - # # @param tools [Array] - # # @param top_p [Float, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # model:, - # description: nil, - # instructions: nil, - # metadata: nil, - # name: nil, - # reasoning_effort: nil, - # response_format: nil, - # temperature: nil, - # tool_resources: nil, - # tools: nil, - # top_p: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(model:, description: nil, instructions: nil, metadata: nil, name: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_resources: nil, tools: nil, top_p: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantCreateParams} for more details. + # + # @param model [String, Symbol, OpenAI::Models::ChatModel] ID of the model to use. You can use the [List models](https://platform.openai.co + # + # @param description [String, nil] The description of the assistant. The maximum length is 512 characters. + # + # @param instructions [String, nil] The system instructions that the assistant uses. The maximum length is 256,000 c + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String, nil] The name of the assistant. The maximum length is 256 characters. + # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: + # + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # + # @param tool_resources [OpenAI::Models::Beta::AssistantCreateParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe + # + # @param tools [Array] A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - class Model < OpenAI::Union + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + module Model + extend OpenAI::Internal::Type::Union + variant String # ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them. - variant enum: -> { OpenAI::Models::ChatModel } + variant enum: -> { OpenAI::ChatModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::ChatModel)] end - class ToolResources < OpenAI::BaseModel - # @!attribute [r] code_interpreter + class ToolResources < OpenAI::Internal::Type::BaseModel + # @!attribute code_interpreter # # @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, nil] - optional :code_interpreter, - -> { OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter } - - # @!parse - # # @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter] - # attr_writer :code_interpreter + optional :code_interpreter, -> { OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter } - # @!attribute [r] file_search + # @!attribute file_search # # @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch, nil] - optional :file_search, -> { OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch } - - # @!parse - # # @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch] - # attr_writer :file_search - - # @!parse - # # A set of resources that are used by the assistant's tools. The resources are - # # specific to the type of tool. For example, the `code_interpreter` tool requires - # # a list of file IDs, while the `file_search` tool requires a list of vector store - # # IDs. - # # - # # @param code_interpreter [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter] - # # @param file_search [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch] - # # - # def initialize(code_interpreter: nil, file_search: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class CodeInterpreter < OpenAI::BaseModel - # @!attribute [r] file_ids + optional :file_search, -> { OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch } + + # @!method initialize(code_interpreter: nil, file_search: nil) + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + # + # @param code_interpreter [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter] + # @param file_search [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch] + + # @see OpenAI::Models::Beta::AssistantCreateParams::ToolResources#code_interpreter + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + # @!attribute file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @return [Array] - # attr_writer :file_ids - - # @!parse - # # @param file_ids [Array] - # # - # def initialize(file_ids: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter} + # for more details. + # + # @param file_ids [Array] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made end - class FileSearch < OpenAI::BaseModel - # @!attribute [r] vector_store_ids + # @see OpenAI::Models::Beta::AssistantCreateParams::ToolResources#file_search + class FileSearch < OpenAI::Internal::Type::BaseModel + # @!attribute vector_store_ids # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. # # @return [Array, nil] - optional :vector_store_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :vector_store_ids + optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!attribute [r] vector_stores + # @!attribute vector_stores # A helper to create a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # with file_ids and attach it to this assistant. There can be a maximum of 1 - # vector store attached to the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this assistant. There can be a maximum of 1 + # vector store attached to the assistant. # # @return [Array, nil] optional :vector_stores, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] } + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] } - # @!parse - # # @return [Array] - # attr_writer :vector_stores - - # @!parse - # # @param vector_store_ids [Array] - # # @param vector_stores [Array] - # # - # def initialize(vector_store_ids: nil, vector_stores: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_ids: nil, vector_stores: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch} for + # more details. + # + # @param vector_store_ids [Array] The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/ + # + # @param vector_stores [Array] A helper to create a [vector store](https://platform.openai.com/docs/api-referen - class VectorStore < OpenAI::BaseModel - # @!attribute [r] chunking_strategy + class VectorStore < OpenAI::Internal::Type::BaseModel + # @!attribute chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. # # @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, nil] optional :chunking_strategy, - union: -> { OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy } + union: -> { OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy } - # @!parse - # # @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] - # attr_writer :chunking_strategy - - # @!attribute [r] file_ids + # @!attribute file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to - # add to the vector store. There can be a maximum of 10000 files in a vector - # store. + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :file_ids + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!parse - # # @param chunking_strategy [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] - # # @param file_ids [Array] - # # @param metadata [Hash{Symbol=>String}, nil] - # # - # def initialize(chunking_strategy: nil, file_ids: nil, metadata: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(chunking_strategy: nil, file_ids: nil, metadata: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore} + # for more details. + # + # @param chunking_strategy [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] The chunking strategy used to chunk the file(s). If not set, will use the `auto` # + # @param file_ids [Array] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to ad + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. - class ChunkingStrategy < OpenAI::Union + # strategy. + # + # @see OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore#chunking_strategy + module ChunkingStrategy + extend OpenAI::Internal::Type::Union + discriminator :type # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. variant :auto, - -> { OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto } + -> { OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto } variant :static, - -> { OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static } + -> { OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static } - class Auto < OpenAI::BaseModel + class Auto < OpenAI::Internal::Type::BaseModel # @!attribute type # Always `auto`. # # @return [Symbol, :auto] required :type, const: :auto - # @!parse - # # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # # `800` and `chunk_overlap_tokens` of `400`. - # # - # # @param type [Symbol, :auto] - # # - # def initialize(type: :auto, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :auto) + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. + # + # @param type [Symbol, :auto] Always `auto`. end - class Static < OpenAI::BaseModel + class Static < OpenAI::Internal::Type::BaseModel # @!attribute static # # @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static] required :static, - -> { OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static } + -> { OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static } # @!attribute type # Always `static`. @@ -351,39 +313,41 @@ class Static < OpenAI::BaseModel # @return [Symbol, :static] required :type, const: :static - # @!parse - # # @param static [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static] - # # @param type [Symbol, :static] - # # - # def initialize(static:, type: :static, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(static:, type: :static) + # @param static [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static] + # + # @param type [Symbol, :static] Always `static`. - class Static < OpenAI::BaseModel + # @see OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static#static + class Static < OpenAI::Internal::Type::BaseModel # @!attribute chunk_overlap_tokens # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. # # @return [Integer] required :chunk_overlap_tokens, Integer # @!attribute max_chunk_size_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. # # @return [Integer] required :max_chunk_size_tokens, Integer - # @!parse - # # @param chunk_overlap_tokens [Integer] - # # @param max_chunk_size_tokens [Integer] - # # - # def initialize(chunk_overlap_tokens:, max_chunk_size_tokens:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(chunk_overlap_tokens:, max_chunk_size_tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static} + # for more details. + # + # @param chunk_overlap_tokens [Integer] The number of tokens that overlap between chunks. The default value is `400`. + # + # @param max_chunk_size_tokens [Integer] The maximum number of tokens in each chunk. The default value is `800`. The mini end end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static)] end end end diff --git a/lib/openai/models/beta/assistant_delete_params.rb b/lib/openai/models/beta/assistant_delete_params.rb index 9262c91e..6200b148 100644 --- a/lib/openai/models/beta/assistant_delete_params.rb +++ b/lib/openai/models/beta/assistant_delete_params.rb @@ -3,17 +3,13 @@ module OpenAI module Models module Beta - class AssistantDeleteParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Assistants#delete + class AssistantDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/assistant_deleted.rb b/lib/openai/models/beta/assistant_deleted.rb index 1e02cf88..9183aceb 100644 --- a/lib/openai/models/beta/assistant_deleted.rb +++ b/lib/openai/models/beta/assistant_deleted.rb @@ -3,7 +3,8 @@ module OpenAI module Models module Beta - class AssistantDeleted < OpenAI::BaseModel + # @see OpenAI::Resources::Beta::Assistants#delete + class AssistantDeleted < OpenAI::Internal::Type::BaseModel # @!attribute id # # @return [String] @@ -12,21 +13,17 @@ class AssistantDeleted < OpenAI::BaseModel # @!attribute deleted # # @return [Boolean] - required :deleted, OpenAI::BooleanModel + required :deleted, OpenAI::Internal::Type::Boolean # @!attribute object # # @return [Symbol, :"assistant.deleted"] required :object, const: :"assistant.deleted" - # @!parse - # # @param id [String] - # # @param deleted [Boolean] - # # @param object [Symbol, :"assistant.deleted"] - # # - # def initialize(id:, deleted:, object: :"assistant.deleted", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, deleted:, object: :"assistant.deleted") + # @param id [String] + # @param deleted [Boolean] + # @param object [Symbol, :"assistant.deleted"] end end end diff --git a/lib/openai/models/beta/assistant_list_params.rb b/lib/openai/models/beta/assistant_list_params.rb index 2deef264..752af1a6 100644 --- a/lib/openai/models/beta/assistant_list_params.rb +++ b/lib/openai/models/beta/assistant_list_params.rb @@ -3,79 +3,67 @@ module OpenAI module Models module Beta - class AssistantListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Assistants#list + class AssistantListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] before + # @!attribute before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String - # @!parse - # # @return [String] - # attr_writer :before - - # @!attribute [r] limit + # @!attribute limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!attribute [r] order + # @!attribute order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::Beta::AssistantListParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::Beta::AssistantListParams::Order } - - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::AssistantListParams::Order] - # attr_writer :order - - # @!parse - # # @param after [String] - # # @param before [String] - # # @param limit [Integer] - # # @param order [Symbol, OpenAI::Models::Beta::AssistantListParams::Order] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, before: nil, limit: nil, order: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :order, enum: -> { OpenAI::Beta::AssistantListParams::Order } - # @abstract + # @!method initialize(after: nil, before: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantListParams} for more details. # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::Beta::AssistantListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. - class Order < OpenAI::Enum + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/assistant_response_format_option.rb b/lib/openai/models/beta/assistant_response_format_option.rb index 18671049..7541c8b2 100644 --- a/lib/openai/models/beta/assistant_response_format_option.rb +++ b/lib/openai/models/beta/assistant_response_format_option.rb @@ -3,44 +3,47 @@ module OpenAI module Models module Beta - # @abstract - # # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. - class AssistantResponseFormatOption < OpenAI::Union + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + module AssistantResponseFormatOption + extend OpenAI::Internal::Type::Union + # `auto` is the default value variant const: :auto # Default response format. Used to generate text responses. - variant -> { OpenAI::Models::ResponseFormatText } + variant -> { OpenAI::ResponseFormatText } # JSON object response format. An older method of generating JSON responses. # Using `json_schema` is recommended for models that support it. Note that the # model will not generate JSON without a system or user message instructing it # to do so. - variant -> { OpenAI::Models::ResponseFormatJSONObject } + variant -> { OpenAI::ResponseFormatJSONObject } # JSON Schema response format. Used to generate structured JSON responses. # Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). - variant -> { OpenAI::Models::ResponseFormatJSONSchema } + variant -> { OpenAI::ResponseFormatJSONSchema } + + # @!method self.variants + # @return [Array(Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema)] end end end diff --git a/lib/openai/models/beta/assistant_retrieve_params.rb b/lib/openai/models/beta/assistant_retrieve_params.rb index d61babe9..852988c7 100644 --- a/lib/openai/models/beta/assistant_retrieve_params.rb +++ b/lib/openai/models/beta/assistant_retrieve_params.rb @@ -3,17 +3,13 @@ module OpenAI module Models module Beta - class AssistantRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Assistants#retrieve + class AssistantRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/assistant_stream_event.rb b/lib/openai/models/beta/assistant_stream_event.rb index 80632016..a4113b4e 100644 --- a/lib/openai/models/beta/assistant_stream_event.rb +++ b/lib/openai/models/beta/assistant_stream_event.rb @@ -3,728 +3,733 @@ module OpenAI module Models module Beta - # @abstract - # # Represents an event emitted when streaming a Run. # - # Each event in a server-sent events stream has an `event` and `data` property: + # Each event in a server-sent events stream has an `event` and `data` property: # - # ``` - # event: thread.created - # data: {"id": "thread_123", "object": "thread", ...} - # ``` + # ``` + # event: thread.created + # data: {"id": "thread_123", "object": "thread", ...} + # ``` # - # We emit events whenever a new object is created, transitions to a new state, or - # is being streamed in parts (deltas). For example, we emit `thread.run.created` - # when a new run is created, `thread.run.completed` when a run completes, and so - # on. When an Assistant chooses to create a message during a run, we emit a - # `thread.message.created event`, a `thread.message.in_progress` event, many - # `thread.message.delta` events, and finally a `thread.message.completed` event. + # We emit events whenever a new object is created, transitions to a new state, or + # is being streamed in parts (deltas). For example, we emit `thread.run.created` + # when a new run is created, `thread.run.completed` when a run completes, and so + # on. When an Assistant chooses to create a message during a run, we emit a + # `thread.message.created event`, a `thread.message.in_progress` event, many + # `thread.message.delta` events, and finally a `thread.message.completed` event. # - # We may add additional events over time, so we recommend handling unknown events - # gracefully in your code. See the - # [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) - # to learn how to integrate the Assistants API with streaming. - class AssistantStreamEvent < OpenAI::Union + # We may add additional events over time, so we recommend handling unknown events + # gracefully in your code. See the + # [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) + # to learn how to integrate the Assistants API with streaming. + module AssistantStreamEvent + extend OpenAI::Internal::Type::Union + discriminator :event # Occurs when a new [thread](https://platform.openai.com/docs/api-reference/threads/object) is created. - variant :"thread.created", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated } + variant :"thread.created", -> { OpenAI::Beta::AssistantStreamEvent::ThreadCreated } # Occurs when a new [run](https://platform.openai.com/docs/api-reference/runs/object) is created. - variant :"thread.run.created", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated } + variant :"thread.run.created", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunCreated } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `queued` status. - variant :"thread.run.queued", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued } + variant :"thread.run.queued", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunQueued } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an `in_progress` status. - variant :"thread.run.in_progress", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress } + variant :"thread.run.in_progress", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunInProgress } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `requires_action` status. - variant :"thread.run.requires_action", - -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction } + variant :"thread.run.requires_action", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunRequiresAction } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed. - variant :"thread.run.completed", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted } + variant :"thread.run.completed", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunCompleted } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with status `incomplete`. - variant :"thread.run.incomplete", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete } + variant :"thread.run.incomplete", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunIncomplete } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails. - variant :"thread.run.failed", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed } + variant :"thread.run.failed", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunFailed } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `cancelling` status. - variant :"thread.run.cancelling", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling } + variant :"thread.run.cancelling", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunCancelling } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled. - variant :"thread.run.cancelled", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled } + variant :"thread.run.cancelled", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunCancelled } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires. - variant :"thread.run.expired", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired } + variant :"thread.run.expired", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunExpired } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is created. - variant :"thread.run.step.created", - -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated } + variant :"thread.run.step.created", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCreated } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. - variant :"thread.run.step.in_progress", - -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress } + variant :"thread.run.step.in_progress", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepInProgress } # Occurs when parts of a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being streamed. - variant :"thread.run.step.delta", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta } + variant :"thread.run.step.delta", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepDelta } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is completed. - variant :"thread.run.step.completed", - -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted } + variant :"thread.run.step.completed", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCompleted } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails. - variant :"thread.run.step.failed", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed } + variant :"thread.run.step.failed", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepFailed } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is cancelled. - variant :"thread.run.step.cancelled", - -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled } + variant :"thread.run.step.cancelled", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCancelled } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires. - variant :"thread.run.step.expired", - -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired } + variant :"thread.run.step.expired", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepExpired } # Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is created. - variant :"thread.message.created", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated } + variant :"thread.message.created", -> { OpenAI::Beta::AssistantStreamEvent::ThreadMessageCreated } # Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves to an `in_progress` state. - variant :"thread.message.in_progress", - -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress } + variant :"thread.message.in_progress", -> { OpenAI::Beta::AssistantStreamEvent::ThreadMessageInProgress } # Occurs when parts of a [Message](https://platform.openai.com/docs/api-reference/messages/object) are being streamed. - variant :"thread.message.delta", -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta } + variant :"thread.message.delta", -> { OpenAI::Beta::AssistantStreamEvent::ThreadMessageDelta } # Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is completed. - variant :"thread.message.completed", - -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted } + variant :"thread.message.completed", -> { OpenAI::Beta::AssistantStreamEvent::ThreadMessageCompleted } # Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends before it is completed. - variant :"thread.message.incomplete", - -> { OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete } + variant :"thread.message.incomplete", -> { OpenAI::Beta::AssistantStreamEvent::ThreadMessageIncomplete } # Occurs when an [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout. - variant :error, -> { OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent } + variant :error, -> { OpenAI::Beta::AssistantStreamEvent::ErrorEvent } - class ThreadCreated < OpenAI::BaseModel + class ThreadCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a thread that contains - # [messages](https://platform.openai.com/docs/api-reference/messages). + # [messages](https://platform.openai.com/docs/api-reference/messages). # # @return [OpenAI::Models::Beta::Thread] - required :data, -> { OpenAI::Models::Beta::Thread } + required :data, -> { OpenAI::Beta::Thread } # @!attribute event # # @return [Symbol, :"thread.created"] required :event, const: :"thread.created" - # @!attribute [r] enabled + # @!attribute enabled # Whether to enable input audio transcription. # # @return [Boolean, nil] - optional :enabled, OpenAI::BooleanModel - - # @!parse - # # @return [Boolean] - # attr_writer :enabled - - # @!parse - # # Occurs when a new - # # [thread](https://platform.openai.com/docs/api-reference/threads/object) is - # # created. - # # - # # @param data [OpenAI::Models::Beta::Thread] - # # @param enabled [Boolean] - # # @param event [Symbol, :"thread.created"] - # # - # def initialize(data:, enabled: nil, event: :"thread.created", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :enabled, OpenAI::Internal::Type::Boolean + + # @!method initialize(data:, enabled: nil, event: :"thread.created") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated} for more details. + # + # Occurs when a new + # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # created. + # + # @param data [OpenAI::Models::Beta::Thread] Represents a thread that contains [messages](https://platform.openai.com/docs/ap + # + # @param enabled [Boolean] Whether to enable input audio transcription. + # + # @param event [Symbol, :"thread.created"] end - class ThreadRunCreated < OpenAI::BaseModel + class ThreadRunCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.created"] required :event, const: :"thread.run.created" - # @!parse - # # Occurs when a new - # # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.created"] - # # - # def initialize(data:, event: :"thread.run.created", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.created") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated} for more details. + # + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.created"] end - class ThreadRunQueued < OpenAI::BaseModel + class ThreadRunQueued < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.queued"] required :event, const: :"thread.run.queued" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `queued` status. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.queued"] - # # - # def initialize(data:, event: :"thread.run.queued", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.queued") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `queued` status. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.queued"] end - class ThreadRunInProgress < OpenAI::BaseModel + class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.in_progress"] required :event, const: :"thread.run.in_progress" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to an `in_progress` status. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.in_progress"] - # # - # def initialize(data:, event: :"thread.run.in_progress", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.in_progress") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress} for more + # details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to an `in_progress` status. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.in_progress"] end - class ThreadRunRequiresAction < OpenAI::BaseModel + class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.requires_action"] required :event, const: :"thread.run.requires_action" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `requires_action` status. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.requires_action"] - # # - # def initialize(data:, event: :"thread.run.requires_action", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.requires_action") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction} for more + # details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `requires_action` status. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.requires_action"] end - class ThreadRunCompleted < OpenAI::BaseModel + class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.completed"] required :event, const: :"thread.run.completed" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # is completed. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.completed"] - # # - # def initialize(data:, event: :"thread.run.completed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted} for more + # details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is completed. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.completed"] end - class ThreadRunIncomplete < OpenAI::BaseModel + class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.incomplete"] required :event, const: :"thread.run.incomplete" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # ends with status `incomplete`. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.incomplete"] - # # - # def initialize(data:, event: :"thread.run.incomplete", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.incomplete") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete} for more + # details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # ends with status `incomplete`. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.incomplete"] end - class ThreadRunFailed < OpenAI::BaseModel + class ThreadRunFailed < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.failed"] required :event, const: :"thread.run.failed" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # fails. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.failed"] - # # - # def initialize(data:, event: :"thread.run.failed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.failed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # fails. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.failed"] end - class ThreadRunCancelling < OpenAI::BaseModel + class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.cancelling"] required :event, const: :"thread.run.cancelling" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `cancelling` status. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.cancelling"] - # # - # def initialize(data:, event: :"thread.run.cancelling", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.cancelling") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling} for more + # details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `cancelling` status. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.cancelling"] end - class ThreadRunCancelled < OpenAI::BaseModel + class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.cancelled"] required :event, const: :"thread.run.cancelled" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # is cancelled. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.cancelled"] - # # - # def initialize(data:, event: :"thread.run.cancelled", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.cancelled") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled} for more + # details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is cancelled. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.cancelled"] end - class ThreadRunExpired < OpenAI::BaseModel + class ThreadRunExpired < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.expired"] required :event, const: :"thread.run.expired" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # expires. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.expired"] - # # - # def initialize(data:, event: :"thread.run.expired", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.expired") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # expires. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.expired"] end - class ThreadRunStepCreated < OpenAI::BaseModel + class ThreadRunStepCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.created"] required :event, const: :"thread.run.step.created" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is created. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.created"] - # # - # def initialize(data:, event: :"thread.run.step.created", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.created") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.created"] end - class ThreadRunStepInProgress < OpenAI::BaseModel + class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.in_progress"] required :event, const: :"thread.run.step.in_progress" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # moves to an `in_progress` state. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.in_progress"] - # # - # def initialize(data:, event: :"thread.run.step.in_progress", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.in_progress") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # moves to an `in_progress` state. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.in_progress"] end - class ThreadRunStepDelta < OpenAI::BaseModel + class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a run step delta i.e. any changed fields on a run step during - # streaming. + # streaming. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStepDeltaEvent } # @!attribute event # # @return [Symbol, :"thread.run.step.delta"] required :event, const: :"thread.run.step.delta" - # @!parse - # # Occurs when parts of a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # are being streamed. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] - # # @param event [Symbol, :"thread.run.step.delta"] - # # - # def initialize(data:, event: :"thread.run.step.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta} for more + # details. + # + # Occurs when parts of a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # are being streamed. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] Represents a run step delta i.e. any changed fields on a run step during streami + # + # @param event [Symbol, :"thread.run.step.delta"] end - class ThreadRunStepCompleted < OpenAI::BaseModel + class ThreadRunStepCompleted < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.completed"] required :event, const: :"thread.run.step.completed" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is completed. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.completed"] - # # - # def initialize(data:, event: :"thread.run.step.completed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is completed. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.completed"] end - class ThreadRunStepFailed < OpenAI::BaseModel + class ThreadRunStepFailed < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.failed"] required :event, const: :"thread.run.step.failed" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # fails. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.failed"] - # # - # def initialize(data:, event: :"thread.run.step.failed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.failed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # fails. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.failed"] end - class ThreadRunStepCancelled < OpenAI::BaseModel + class ThreadRunStepCancelled < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.cancelled"] required :event, const: :"thread.run.step.cancelled" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is cancelled. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.cancelled"] - # # - # def initialize(data:, event: :"thread.run.step.cancelled", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.cancelled") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is cancelled. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.cancelled"] end - class ThreadRunStepExpired < OpenAI::BaseModel + class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.expired"] required :event, const: :"thread.run.step.expired" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # expires. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.expired"] - # # - # def initialize(data:, event: :"thread.run.step.expired", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.expired") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # expires. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.expired"] end - class ThreadMessageCreated < OpenAI::BaseModel + class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] - required :data, -> { OpenAI::Models::Beta::Threads::Message } + required :data, -> { OpenAI::Beta::Threads::Message } # @!attribute event # # @return [Symbol, :"thread.message.created"] required :event, const: :"thread.message.created" - # @!parse - # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # # created. - # # - # # @param data [OpenAI::Models::Beta::Threads::Message] - # # @param event [Symbol, :"thread.message.created"] - # # - # def initialize(data:, event: :"thread.message.created", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.message.created") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated} for more + # details. + # + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. + # + # @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe + # + # @param event [Symbol, :"thread.message.created"] end - class ThreadMessageInProgress < OpenAI::BaseModel + class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] - required :data, -> { OpenAI::Models::Beta::Threads::Message } + required :data, -> { OpenAI::Beta::Threads::Message } # @!attribute event # # @return [Symbol, :"thread.message.in_progress"] required :event, const: :"thread.message.in_progress" - # @!parse - # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) moves - # # to an `in_progress` state. - # # - # # @param data [OpenAI::Models::Beta::Threads::Message] - # # @param event [Symbol, :"thread.message.in_progress"] - # # - # def initialize(data:, event: :"thread.message.in_progress", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.message.in_progress") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress} for more + # details. + # + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # to an `in_progress` state. + # + # @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe + # + # @param event [Symbol, :"thread.message.in_progress"] end - class ThreadMessageDelta < OpenAI::BaseModel + class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message delta i.e. any changed fields on a message during - # streaming. + # streaming. # # @return [OpenAI::Models::Beta::Threads::MessageDeltaEvent] - required :data, -> { OpenAI::Models::Beta::Threads::MessageDeltaEvent } + required :data, -> { OpenAI::Beta::Threads::MessageDeltaEvent } # @!attribute event # # @return [Symbol, :"thread.message.delta"] required :event, const: :"thread.message.delta" - # @!parse - # # Occurs when parts of a - # # [Message](https://platform.openai.com/docs/api-reference/messages/object) are - # # being streamed. - # # - # # @param data [OpenAI::Models::Beta::Threads::MessageDeltaEvent] - # # @param event [Symbol, :"thread.message.delta"] - # # - # def initialize(data:, event: :"thread.message.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.message.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta} for more + # details. + # + # Occurs when parts of a + # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # being streamed. + # + # @param data [OpenAI::Models::Beta::Threads::MessageDeltaEvent] Represents a message delta i.e. any changed fields on a message during streaming + # + # @param event [Symbol, :"thread.message.delta"] end - class ThreadMessageCompleted < OpenAI::BaseModel + class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] - required :data, -> { OpenAI::Models::Beta::Threads::Message } + required :data, -> { OpenAI::Beta::Threads::Message } # @!attribute event # # @return [Symbol, :"thread.message.completed"] required :event, const: :"thread.message.completed" - # @!parse - # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # # completed. - # # - # # @param data [OpenAI::Models::Beta::Threads::Message] - # # @param event [Symbol, :"thread.message.completed"] - # # - # def initialize(data:, event: :"thread.message.completed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.message.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted} for more + # details. + # + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # completed. + # + # @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe + # + # @param event [Symbol, :"thread.message.completed"] end - class ThreadMessageIncomplete < OpenAI::BaseModel + class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] - required :data, -> { OpenAI::Models::Beta::Threads::Message } + required :data, -> { OpenAI::Beta::Threads::Message } # @!attribute event # # @return [Symbol, :"thread.message.incomplete"] required :event, const: :"thread.message.incomplete" - # @!parse - # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) ends - # # before it is completed. - # # - # # @param data [OpenAI::Models::Beta::Threads::Message] - # # @param event [Symbol, :"thread.message.incomplete"] - # # - # def initialize(data:, event: :"thread.message.incomplete", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.message.incomplete") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete} for more + # details. + # + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # before it is completed. + # + # @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe + # + # @param event [Symbol, :"thread.message.incomplete"] end - class ErrorEvent < OpenAI::BaseModel + class ErrorEvent < OpenAI::Internal::Type::BaseModel # @!attribute data # # @return [OpenAI::Models::ErrorObject] - required :data, -> { OpenAI::Models::ErrorObject } + required :data, -> { OpenAI::ErrorObject } # @!attribute event # # @return [Symbol, :error] required :event, const: :error - # @!parse - # # Occurs when an - # # [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. - # # This can happen due to an internal server error or a timeout. - # # - # # @param data [OpenAI::Models::ErrorObject] - # # @param event [Symbol, :error] - # # - # def initialize(data:, event: :error, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :error) + # Occurs when an + # [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. + # This can happen due to an internal server error or a timeout. + # + # @param data [OpenAI::Models::ErrorObject] + # @param event [Symbol, :error] end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent)] end end end diff --git a/lib/openai/models/beta/assistant_tool.rb b/lib/openai/models/beta/assistant_tool.rb index 7cd4e9b8..495ff39a 100644 --- a/lib/openai/models/beta/assistant_tool.rb +++ b/lib/openai/models/beta/assistant_tool.rb @@ -3,16 +3,19 @@ module OpenAI module Models module Beta - # @abstract - # - class AssistantTool < OpenAI::Union + module AssistantTool + extend OpenAI::Internal::Type::Union + discriminator :type - variant :code_interpreter, -> { OpenAI::Models::Beta::CodeInterpreterTool } + variant :code_interpreter, -> { OpenAI::Beta::CodeInterpreterTool } + + variant :file_search, -> { OpenAI::Beta::FileSearchTool } - variant :file_search, -> { OpenAI::Models::Beta::FileSearchTool } + variant :function, -> { OpenAI::Beta::FunctionTool } - variant :function, -> { OpenAI::Models::Beta::FunctionTool } + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool)] end end end diff --git a/lib/openai/models/beta/assistant_tool_choice.rb b/lib/openai/models/beta/assistant_tool_choice.rb index 9aba0349..a4fc7a03 100644 --- a/lib/openai/models/beta/assistant_tool_choice.rb +++ b/lib/openai/models/beta/assistant_tool_choice.rb @@ -3,42 +3,38 @@ module OpenAI module Models module Beta - class AssistantToolChoice < OpenAI::BaseModel + class AssistantToolChoice < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of the tool. If type is `function`, the function name must be set # # @return [Symbol, OpenAI::Models::Beta::AssistantToolChoice::Type] - required :type, enum: -> { OpenAI::Models::Beta::AssistantToolChoice::Type } + required :type, enum: -> { OpenAI::Beta::AssistantToolChoice::Type } - # @!attribute [r] function + # @!attribute function # # @return [OpenAI::Models::Beta::AssistantToolChoiceFunction, nil] - optional :function, -> { OpenAI::Models::Beta::AssistantToolChoiceFunction } + optional :function, -> { OpenAI::Beta::AssistantToolChoiceFunction } - # @!parse - # # @return [OpenAI::Models::Beta::AssistantToolChoiceFunction] - # attr_writer :function - - # @!parse - # # Specifies a tool the model should use. Use to force the model to call a specific - # # tool. - # # - # # @param type [Symbol, OpenAI::Models::Beta::AssistantToolChoice::Type] - # # @param function [OpenAI::Models::Beta::AssistantToolChoiceFunction] - # # - # def initialize(type:, function: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(type:, function: nil) + # Specifies a tool the model should use. Use to force the model to call a specific + # tool. + # + # @param type [Symbol, OpenAI::Models::Beta::AssistantToolChoice::Type] The type of the tool. If type is `function`, the function name must be set # + # @param function [OpenAI::Models::Beta::AssistantToolChoiceFunction] + # The type of the tool. If type is `function`, the function name must be set - class Type < OpenAI::Enum + # + # @see OpenAI::Models::Beta::AssistantToolChoice#type + module Type + extend OpenAI::Internal::Type::Enum + FUNCTION = :function CODE_INTERPRETER = :code_interpreter FILE_SEARCH = :file_search - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/assistant_tool_choice_function.rb b/lib/openai/models/beta/assistant_tool_choice_function.rb index 4e127718..8440fb98 100644 --- a/lib/openai/models/beta/assistant_tool_choice_function.rb +++ b/lib/openai/models/beta/assistant_tool_choice_function.rb @@ -3,19 +3,15 @@ module OpenAI module Models module Beta - class AssistantToolChoiceFunction < OpenAI::BaseModel + class AssistantToolChoiceFunction < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the function to call. # # @return [String] required :name, String - # @!parse - # # @param name [String] - # # - # def initialize(name:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(name:) + # @param name [String] The name of the function to call. end end end diff --git a/lib/openai/models/beta/assistant_tool_choice_option.rb b/lib/openai/models/beta/assistant_tool_choice_option.rb index 84736979..8bfdb818 100644 --- a/lib/openai/models/beta/assistant_tool_choice_option.rb +++ b/lib/openai/models/beta/assistant_tool_choice_option.rb @@ -3,35 +3,39 @@ module OpenAI module Models module Beta - # @abstract - # # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. - class AssistantToolChoiceOption < OpenAI::Union + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + module AssistantToolChoiceOption + extend OpenAI::Internal::Type::Union + # `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. - variant enum: -> { OpenAI::Models::Beta::AssistantToolChoiceOption::Auto } + variant enum: -> { OpenAI::Beta::AssistantToolChoiceOption::Auto } # Specifies a tool the model should use. Use to force the model to call a specific tool. - variant -> { OpenAI::Models::Beta::AssistantToolChoice } + variant -> { OpenAI::Beta::AssistantToolChoice } - # @abstract - # # `none` means the model will not call any tools and instead generates a message. - # `auto` means the model can pick between generating a message or calling one or - # more tools. `required` means the model must call one or more tools before - # responding to the user. - class Auto < OpenAI::Enum + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools before + # responding to the user. + module Auto + extend OpenAI::Internal::Type::Enum + NONE = :none AUTO = :auto REQUIRED = :required - finalize! + # @!method self.values + # @return [Array] end + + # @!method self.variants + # @return [Array(Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice)] end end end diff --git a/lib/openai/models/beta/assistant_update_params.rb b/lib/openai/models/beta/assistant_update_params.rb index 5cedcb79..75ec6a82 100644 --- a/lib/openai/models/beta/assistant_update_params.rb +++ b/lib/openai/models/beta/assistant_update_params.rb @@ -3,10 +3,10 @@ module OpenAI module Models module Beta - class AssistantUpdateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Assistants#update + class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute description # The description of the assistant. The maximum length is 512 characters. @@ -16,35 +16,31 @@ class AssistantUpdateParams < OpenAI::BaseModel # @!attribute instructions # The system instructions that the assistant uses. The maximum length is 256,000 - # characters. + # characters. # # @return [String, nil] optional :instructions, String, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!attribute [r] model + # @!attribute model # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. # - # @return [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model::AssistantSupportedModels, nil] - optional :model, union: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model } - - # @!parse - # # @return [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model::AssistantSupportedModels] - # attr_writer :model + # @return [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model, nil] + optional :model, union: -> { OpenAI::Beta::AssistantUpdateParams::Model } # @!attribute name # The name of the assistant. The maximum length is 256 characters. @@ -53,245 +49,308 @@ class AssistantUpdateParams < OpenAI::BaseModel optional :name, String, nil?: true # @!attribute reasoning_effort - # **o-series models only** - # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] - optional :reasoning_effort, enum: -> { OpenAI::Models::ReasoningEffort }, nil?: true + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - optional :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true + optional :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute tool_resources # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. # # @return [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources, nil] - optional :tool_resources, -> { OpenAI::Models::Beta::AssistantUpdateParams::ToolResources }, nil?: true + optional :tool_resources, -> { OpenAI::Beta::AssistantUpdateParams::ToolResources }, nil?: true - # @!attribute [r] tools + # @!attribute tools # A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. # # @return [Array, nil] - optional :tools, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool] } - - # @!parse - # # @return [Array] - # attr_writer :tools + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool] } # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true - # @!parse - # # @param description [String, nil] - # # @param instructions [String, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param model [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model::AssistantSupportedModels] - # # @param name [String, nil] - # # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] - # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - # # @param temperature [Float, nil] - # # @param tool_resources [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources, nil] - # # @param tools [Array] - # # @param top_p [Float, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # description: nil, - # instructions: nil, - # metadata: nil, - # model: nil, - # name: nil, - # reasoning_effort: nil, - # response_format: nil, - # temperature: nil, - # tool_resources: nil, - # tools: nil, - # top_p: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(description: nil, instructions: nil, metadata: nil, model: nil, name: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_resources: nil, tools: nil, top_p: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantUpdateParams} for more details. + # + # @param description [String, nil] The description of the assistant. The maximum length is 512 characters. + # + # @param instructions [String, nil] The system instructions that the assistant uses. The maximum length is 256,000 c + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # + # @param model [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model] ID of the model to use. You can use the [List models](https://platform.openai.co + # + # @param name [String, nil] The name of the assistant. The maximum length is 256 characters. + # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: + # + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m + # + # @param tool_resources [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe + # + # @param tools [Array] A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - class Model < OpenAI::Union + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + module Model + extend OpenAI::Internal::Type::Union + variant String - # ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them. - variant enum: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::AssistantSupportedModels } + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5 } - # @abstract - # - # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - class AssistantSupportedModels < OpenAI::Enum - O3_MINI = :"o3-mini" - O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" - O1 = :o1 - O1_2024_12_17 = :"o1-2024-12-17" - GPT_4O = :"gpt-4o" - GPT_4O_2024_11_20 = :"gpt-4o-2024-11-20" - GPT_4O_2024_08_06 = :"gpt-4o-2024-08-06" - GPT_4O_2024_05_13 = :"gpt-4o-2024-05-13" - GPT_4O_MINI = :"gpt-4o-mini" - GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18" - GPT_4_5_PREVIEW = :"gpt-4.5-preview" - GPT_4_5_PREVIEW_2025_02_27 = :"gpt-4.5-preview-2025-02-27" - GPT_4_TURBO = :"gpt-4-turbo" - GPT_4_TURBO_2024_04_09 = :"gpt-4-turbo-2024-04-09" - GPT_4_0125_PREVIEW = :"gpt-4-0125-preview" - GPT_4_TURBO_PREVIEW = :"gpt-4-turbo-preview" - GPT_4_1106_PREVIEW = :"gpt-4-1106-preview" - GPT_4_VISION_PREVIEW = :"gpt-4-vision-preview" - GPT_4 = :"gpt-4" - GPT_4_0314 = :"gpt-4-0314" - GPT_4_0613 = :"gpt-4-0613" - GPT_4_32K = :"gpt-4-32k" - GPT_4_32K_0314 = :"gpt-4-32k-0314" - GPT_4_32K_0613 = :"gpt-4-32k-0613" - GPT_3_5_TURBO = :"gpt-3.5-turbo" - GPT_3_5_TURBO_16K = :"gpt-3.5-turbo-16k" - GPT_3_5_TURBO_0613 = :"gpt-3.5-turbo-0613" - GPT_3_5_TURBO_1106 = :"gpt-3.5-turbo-1106" - GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125" - GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613" - - finalize! + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_MINI } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_NANO } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_2025_08_07 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_MINI_2025_08_07 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_NANO_2025_08_07 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_MINI } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_NANO } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_2025_04_14 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_MINI_2025_04_14 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_NANO_2025_04_14 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::O3_MINI } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::O3_MINI_2025_01_31 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::O1 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::O1_2024_12_17 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_2024_11_20 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_2024_08_06 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_2024_05_13 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_MINI } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_MINI_2024_07_18 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_5_PREVIEW } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_5_PREVIEW_2025_02_27 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_TURBO } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_TURBO_2024_04_09 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_0125_PREVIEW } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_TURBO_PREVIEW } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1106_PREVIEW } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_VISION_PREVIEW } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_0314 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_0613 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_32K } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_32K_0314 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_32K_0613 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_16K } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_0613 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_1106 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_0125 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_16K_0613 } + + # @!method self.variants + # @return [Array(String, Symbol)] + + define_sorbet_constant!(:Variants) do + T.type_alias { T.any(String, OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol) } end + + # @!group + + GPT_5 = :"gpt-5" + GPT_5_MINI = :"gpt-5-mini" + GPT_5_NANO = :"gpt-5-nano" + GPT_5_2025_08_07 = :"gpt-5-2025-08-07" + GPT_5_MINI_2025_08_07 = :"gpt-5-mini-2025-08-07" + GPT_5_NANO_2025_08_07 = :"gpt-5-nano-2025-08-07" + GPT_4_1 = :"gpt-4.1" + GPT_4_1_MINI = :"gpt-4.1-mini" + GPT_4_1_NANO = :"gpt-4.1-nano" + GPT_4_1_2025_04_14 = :"gpt-4.1-2025-04-14" + GPT_4_1_MINI_2025_04_14 = :"gpt-4.1-mini-2025-04-14" + GPT_4_1_NANO_2025_04_14 = :"gpt-4.1-nano-2025-04-14" + O3_MINI = :"o3-mini" + O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" + O1 = :o1 + O1_2024_12_17 = :"o1-2024-12-17" + GPT_4O = :"gpt-4o" + GPT_4O_2024_11_20 = :"gpt-4o-2024-11-20" + GPT_4O_2024_08_06 = :"gpt-4o-2024-08-06" + GPT_4O_2024_05_13 = :"gpt-4o-2024-05-13" + GPT_4O_MINI = :"gpt-4o-mini" + GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18" + GPT_4_5_PREVIEW = :"gpt-4.5-preview" + GPT_4_5_PREVIEW_2025_02_27 = :"gpt-4.5-preview-2025-02-27" + GPT_4_TURBO = :"gpt-4-turbo" + GPT_4_TURBO_2024_04_09 = :"gpt-4-turbo-2024-04-09" + GPT_4_0125_PREVIEW = :"gpt-4-0125-preview" + GPT_4_TURBO_PREVIEW = :"gpt-4-turbo-preview" + GPT_4_1106_PREVIEW = :"gpt-4-1106-preview" + GPT_4_VISION_PREVIEW = :"gpt-4-vision-preview" + GPT_4 = :"gpt-4" + GPT_4_0314 = :"gpt-4-0314" + GPT_4_0613 = :"gpt-4-0613" + GPT_4_32K = :"gpt-4-32k" + GPT_4_32K_0314 = :"gpt-4-32k-0314" + GPT_4_32K_0613 = :"gpt-4-32k-0613" + GPT_3_5_TURBO = :"gpt-3.5-turbo" + GPT_3_5_TURBO_16K = :"gpt-3.5-turbo-16k" + GPT_3_5_TURBO_0613 = :"gpt-3.5-turbo-0613" + GPT_3_5_TURBO_1106 = :"gpt-3.5-turbo-1106" + GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125" + GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613" + + # @!endgroup end - class ToolResources < OpenAI::BaseModel - # @!attribute [r] code_interpreter + class ToolResources < OpenAI::Internal::Type::BaseModel + # @!attribute code_interpreter # # @return [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, nil] - optional :code_interpreter, - -> { OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter } - - # @!parse - # # @return [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter] - # attr_writer :code_interpreter + optional :code_interpreter, -> { OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter } - # @!attribute [r] file_search + # @!attribute file_search # # @return [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch, nil] - optional :file_search, -> { OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch } - - # @!parse - # # @return [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch] - # attr_writer :file_search - - # @!parse - # # A set of resources that are used by the assistant's tools. The resources are - # # specific to the type of tool. For example, the `code_interpreter` tool requires - # # a list of file IDs, while the `file_search` tool requires a list of vector store - # # IDs. - # # - # # @param code_interpreter [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter] - # # @param file_search [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch] - # # - # def initialize(code_interpreter: nil, file_search: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class CodeInterpreter < OpenAI::BaseModel - # @!attribute [r] file_ids + optional :file_search, -> { OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch } + + # @!method initialize(code_interpreter: nil, file_search: nil) + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + # + # @param code_interpreter [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter] + # @param file_search [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch] + + # @see OpenAI::Models::Beta::AssistantUpdateParams::ToolResources#code_interpreter + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + # @!attribute file_ids # Overrides the list of - # [file](https://platform.openai.com/docs/api-reference/files) IDs made available - # to the `code_interpreter` tool. There can be a maximum of 20 files associated - # with the tool. + # [file](https://platform.openai.com/docs/api-reference/files) IDs made available + # to the `code_interpreter` tool. There can be a maximum of 20 files associated + # with the tool. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :file_ids - - # @!parse - # # @param file_ids [Array] - # # - # def initialize(file_ids: nil, **) = super + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter} + # for more details. + # + # @param file_ids [Array] Overrides the list of [file](https://platform.openai.com/docs/api-reference/file end - class FileSearch < OpenAI::BaseModel - # @!attribute [r] vector_store_ids + # @see OpenAI::Models::Beta::AssistantUpdateParams::ToolResources#file_search + class FileSearch < OpenAI::Internal::Type::BaseModel + # @!attribute vector_store_ids # Overrides the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. # # @return [Array, nil] - optional :vector_store_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :vector_store_ids + optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @param vector_store_ids [Array] - # # - # def initialize(vector_store_ids: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch} for + # more details. + # + # @param vector_store_ids [Array] Overrides the [vector store](https://platform.openai.com/docs/api-reference/vect end end end diff --git a/lib/openai/models/beta/code_interpreter_tool.rb b/lib/openai/models/beta/code_interpreter_tool.rb index 0e226273..ee84099e 100644 --- a/lib/openai/models/beta/code_interpreter_tool.rb +++ b/lib/openai/models/beta/code_interpreter_tool.rb @@ -3,19 +3,15 @@ module OpenAI module Models module Beta - class CodeInterpreterTool < OpenAI::BaseModel + class CodeInterpreterTool < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool being defined: `code_interpreter` # # @return [Symbol, :code_interpreter] required :type, const: :code_interpreter - # @!parse - # # @param type [Symbol, :code_interpreter] - # # - # def initialize(type: :code_interpreter, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :code_interpreter) + # @param type [Symbol, :code_interpreter] The type of tool being defined: `code_interpreter` end end end diff --git a/lib/openai/models/beta/file_search_tool.rb b/lib/openai/models/beta/file_search_tool.rb index 333f1770..e12b3e5a 100644 --- a/lib/openai/models/beta/file_search_tool.rb +++ b/lib/openai/models/beta/file_search_tool.rb @@ -3,117 +3,104 @@ module OpenAI module Models module Beta - class FileSearchTool < OpenAI::BaseModel + class FileSearchTool < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool being defined: `file_search` # # @return [Symbol, :file_search] required :type, const: :file_search - # @!attribute [r] file_search + # @!attribute file_search # Overrides for the file search tool. # # @return [OpenAI::Models::Beta::FileSearchTool::FileSearch, nil] - optional :file_search, -> { OpenAI::Models::Beta::FileSearchTool::FileSearch } + optional :file_search, -> { OpenAI::Beta::FileSearchTool::FileSearch } - # @!parse - # # @return [OpenAI::Models::Beta::FileSearchTool::FileSearch] - # attr_writer :file_search - - # @!parse - # # @param file_search [OpenAI::Models::Beta::FileSearchTool::FileSearch] - # # @param type [Symbol, :file_search] - # # - # def initialize(file_search: nil, type: :file_search, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_search: nil, type: :file_search) + # @param file_search [OpenAI::Models::Beta::FileSearchTool::FileSearch] Overrides for the file search tool. + # + # @param type [Symbol, :file_search] The type of tool being defined: `file_search` - class FileSearch < OpenAI::BaseModel - # @!attribute [r] max_num_results + # @see OpenAI::Models::Beta::FileSearchTool#file_search + class FileSearch < OpenAI::Internal::Type::BaseModel + # @!attribute max_num_results # The maximum number of results the file search tool should output. The default is - # 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between - # 1 and 50 inclusive. + # 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between + # 1 and 50 inclusive. # - # Note that the file search tool may output fewer than `max_num_results` results. - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # Note that the file search tool may output fewer than `max_num_results` results. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # # @return [Integer, nil] optional :max_num_results, Integer - # @!parse - # # @return [Integer] - # attr_writer :max_num_results - - # @!attribute [r] ranking_options + # @!attribute ranking_options # The ranking options for the file search. If not specified, the file search tool - # will use the `auto` ranker and a score_threshold of 0. + # will use the `auto` ranker and a score_threshold of 0. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # # @return [OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions, nil] - optional :ranking_options, -> { OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions } - - # @!parse - # # @return [OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions] - # attr_writer :ranking_options + optional :ranking_options, -> { OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions } - # @!parse - # # Overrides for the file search tool. - # # - # # @param max_num_results [Integer] - # # @param ranking_options [OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions] - # # - # def initialize(max_num_results: nil, ranking_options: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(max_num_results: nil, ranking_options: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::FileSearchTool::FileSearch} for more details. + # + # Overrides for the file search tool. + # + # @param max_num_results [Integer] The maximum number of results the file search tool should output. The default is + # + # @param ranking_options [OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions] The ranking options for the file search. If not specified, the file search tool - class RankingOptions < OpenAI::BaseModel + # @see OpenAI::Models::Beta::FileSearchTool::FileSearch#ranking_options + class RankingOptions < OpenAI::Internal::Type::BaseModel # @!attribute score_threshold # The score threshold for the file search. All values must be a floating point - # number between 0 and 1. + # number between 0 and 1. # # @return [Float] required :score_threshold, Float - # @!attribute [r] ranker + # @!attribute ranker # The ranker to use for the file search. If not specified will use the `auto` - # ranker. + # ranker. # # @return [Symbol, OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker, nil] - optional :ranker, enum: -> { OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker } + optional :ranker, enum: -> { OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker } - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker] - # attr_writer :ranker - - # @!parse - # # The ranking options for the file search. If not specified, the file search tool - # # will use the `auto` ranker and a score_threshold of 0. - # # - # # See the - # # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # # for more information. - # # - # # @param score_threshold [Float] - # # @param ranker [Symbol, OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker] - # # - # def initialize(score_threshold:, ranker: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(score_threshold:, ranker: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions} for more + # details. + # + # The ranking options for the file search. If not specified, the file search tool + # will use the `auto` ranker and a score_threshold of 0. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # + # @param score_threshold [Float] The score threshold for the file search. All values must be a floating point num + # + # @param ranker [Symbol, OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker] The ranker to use for the file search. If not specified will use the `auto` rank + # The ranker to use for the file search. If not specified will use the `auto` - # ranker. - class Ranker < OpenAI::Enum + # ranker. + # + # @see OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions#ranker + module Ranker + extend OpenAI::Internal::Type::Enum + AUTO = :auto DEFAULT_2024_08_21 = :default_2024_08_21 - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/function_tool.rb b/lib/openai/models/beta/function_tool.rb index 5b9cec7a..361c2c44 100644 --- a/lib/openai/models/beta/function_tool.rb +++ b/lib/openai/models/beta/function_tool.rb @@ -3,11 +3,11 @@ module OpenAI module Models module Beta - class FunctionTool < OpenAI::BaseModel + class FunctionTool < OpenAI::Internal::Type::BaseModel # @!attribute function # # @return [OpenAI::Models::FunctionDefinition] - required :function, -> { OpenAI::Models::FunctionDefinition } + required :function, -> { OpenAI::FunctionDefinition } # @!attribute type # The type of tool being defined: `function` @@ -15,13 +15,10 @@ class FunctionTool < OpenAI::BaseModel # @return [Symbol, :function] required :type, const: :function - # @!parse - # # @param function [OpenAI::Models::FunctionDefinition] - # # @param type [Symbol, :function] - # # - # def initialize(function:, type: :function, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(function:, type: :function) + # @param function [OpenAI::Models::FunctionDefinition] + # + # @param type [Symbol, :function] The type of tool being defined: `function` end end end diff --git a/lib/openai/models/beta/message_stream_event.rb b/lib/openai/models/beta/message_stream_event.rb index 32e6ee21..1c147dd7 100644 --- a/lib/openai/models/beta/message_stream_event.rb +++ b/lib/openai/models/beta/message_stream_event.rb @@ -3,161 +3,165 @@ module OpenAI module Models module Beta - # @abstract - # # Occurs when a - # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # created. - class MessageStreamEvent < OpenAI::Union + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. + module MessageStreamEvent + extend OpenAI::Internal::Type::Union + discriminator :event # Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is created. - variant :"thread.message.created", -> { OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated } + variant :"thread.message.created", -> { OpenAI::Beta::MessageStreamEvent::ThreadMessageCreated } # Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves to an `in_progress` state. - variant :"thread.message.in_progress", - -> { OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress } + variant :"thread.message.in_progress", -> { OpenAI::Beta::MessageStreamEvent::ThreadMessageInProgress } # Occurs when parts of a [Message](https://platform.openai.com/docs/api-reference/messages/object) are being streamed. - variant :"thread.message.delta", -> { OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta } + variant :"thread.message.delta", -> { OpenAI::Beta::MessageStreamEvent::ThreadMessageDelta } # Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is completed. - variant :"thread.message.completed", - -> { OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted } + variant :"thread.message.completed", -> { OpenAI::Beta::MessageStreamEvent::ThreadMessageCompleted } # Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends before it is completed. - variant :"thread.message.incomplete", - -> { OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete } + variant :"thread.message.incomplete", -> { OpenAI::Beta::MessageStreamEvent::ThreadMessageIncomplete } - class ThreadMessageCreated < OpenAI::BaseModel + class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] - required :data, -> { OpenAI::Models::Beta::Threads::Message } + required :data, -> { OpenAI::Beta::Threads::Message } # @!attribute event # # @return [Symbol, :"thread.message.created"] required :event, const: :"thread.message.created" - # @!parse - # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # # created. - # # - # # @param data [OpenAI::Models::Beta::Threads::Message] - # # @param event [Symbol, :"thread.message.created"] - # # - # def initialize(data:, event: :"thread.message.created", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.message.created") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated} for more + # details. + # + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. + # + # @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe + # + # @param event [Symbol, :"thread.message.created"] end - class ThreadMessageInProgress < OpenAI::BaseModel + class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] - required :data, -> { OpenAI::Models::Beta::Threads::Message } + required :data, -> { OpenAI::Beta::Threads::Message } # @!attribute event # # @return [Symbol, :"thread.message.in_progress"] required :event, const: :"thread.message.in_progress" - # @!parse - # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) moves - # # to an `in_progress` state. - # # - # # @param data [OpenAI::Models::Beta::Threads::Message] - # # @param event [Symbol, :"thread.message.in_progress"] - # # - # def initialize(data:, event: :"thread.message.in_progress", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.message.in_progress") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress} for more + # details. + # + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # to an `in_progress` state. + # + # @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe + # + # @param event [Symbol, :"thread.message.in_progress"] end - class ThreadMessageDelta < OpenAI::BaseModel + class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message delta i.e. any changed fields on a message during - # streaming. + # streaming. # # @return [OpenAI::Models::Beta::Threads::MessageDeltaEvent] - required :data, -> { OpenAI::Models::Beta::Threads::MessageDeltaEvent } + required :data, -> { OpenAI::Beta::Threads::MessageDeltaEvent } # @!attribute event # # @return [Symbol, :"thread.message.delta"] required :event, const: :"thread.message.delta" - # @!parse - # # Occurs when parts of a - # # [Message](https://platform.openai.com/docs/api-reference/messages/object) are - # # being streamed. - # # - # # @param data [OpenAI::Models::Beta::Threads::MessageDeltaEvent] - # # @param event [Symbol, :"thread.message.delta"] - # # - # def initialize(data:, event: :"thread.message.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.message.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta} for more details. + # + # Occurs when parts of a + # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # being streamed. + # + # @param data [OpenAI::Models::Beta::Threads::MessageDeltaEvent] Represents a message delta i.e. any changed fields on a message during streaming + # + # @param event [Symbol, :"thread.message.delta"] end - class ThreadMessageCompleted < OpenAI::BaseModel + class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] - required :data, -> { OpenAI::Models::Beta::Threads::Message } + required :data, -> { OpenAI::Beta::Threads::Message } # @!attribute event # # @return [Symbol, :"thread.message.completed"] required :event, const: :"thread.message.completed" - # @!parse - # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) is - # # completed. - # # - # # @param data [OpenAI::Models::Beta::Threads::Message] - # # @param event [Symbol, :"thread.message.completed"] - # # - # def initialize(data:, event: :"thread.message.completed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.message.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted} for more + # details. + # + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # completed. + # + # @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe + # + # @param event [Symbol, :"thread.message.completed"] end - class ThreadMessageIncomplete < OpenAI::BaseModel + class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a message within a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Message] - required :data, -> { OpenAI::Models::Beta::Threads::Message } + required :data, -> { OpenAI::Beta::Threads::Message } # @!attribute event # # @return [Symbol, :"thread.message.incomplete"] required :event, const: :"thread.message.incomplete" - # @!parse - # # Occurs when a - # # [message](https://platform.openai.com/docs/api-reference/messages/object) ends - # # before it is completed. - # # - # # @param data [OpenAI::Models::Beta::Threads::Message] - # # @param event [Symbol, :"thread.message.incomplete"] - # # - # def initialize(data:, event: :"thread.message.incomplete", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.message.incomplete") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete} for more + # details. + # + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # before it is completed. + # + # @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe + # + # @param event [Symbol, :"thread.message.incomplete"] end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete)] end end end diff --git a/lib/openai/models/beta/run_step_stream_event.rb b/lib/openai/models/beta/run_step_stream_event.rb index 645de8bd..16c87a3d 100644 --- a/lib/openai/models/beta/run_step_stream_event.rb +++ b/lib/openai/models/beta/run_step_stream_event.rb @@ -3,213 +3,219 @@ module OpenAI module Models module Beta - # @abstract - # # Occurs when a - # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # is created. - class RunStepStreamEvent < OpenAI::Union + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. + module RunStepStreamEvent + extend OpenAI::Internal::Type::Union + discriminator :event # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is created. - variant :"thread.run.step.created", -> { OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated } + variant :"thread.run.step.created", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCreated } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. - variant :"thread.run.step.in_progress", - -> { OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress } + variant :"thread.run.step.in_progress", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepInProgress } # Occurs when parts of a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being streamed. - variant :"thread.run.step.delta", -> { OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta } + variant :"thread.run.step.delta", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepDelta } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is completed. - variant :"thread.run.step.completed", - -> { OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted } + variant :"thread.run.step.completed", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCompleted } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails. - variant :"thread.run.step.failed", -> { OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed } + variant :"thread.run.step.failed", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepFailed } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is cancelled. - variant :"thread.run.step.cancelled", - -> { OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled } + variant :"thread.run.step.cancelled", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCancelled } # Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires. - variant :"thread.run.step.expired", -> { OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired } + variant :"thread.run.step.expired", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepExpired } - class ThreadRunStepCreated < OpenAI::BaseModel + class ThreadRunStepCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.created"] required :event, const: :"thread.run.step.created" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is created. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.created"] - # # - # def initialize(data:, event: :"thread.run.step.created", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.created") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.created"] end - class ThreadRunStepInProgress < OpenAI::BaseModel + class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.in_progress"] required :event, const: :"thread.run.step.in_progress" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # moves to an `in_progress` state. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.in_progress"] - # # - # def initialize(data:, event: :"thread.run.step.in_progress", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.in_progress") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # moves to an `in_progress` state. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.in_progress"] end - class ThreadRunStepDelta < OpenAI::BaseModel + class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a run step delta i.e. any changed fields on a run step during - # streaming. + # streaming. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStepDeltaEvent } # @!attribute event # # @return [Symbol, :"thread.run.step.delta"] required :event, const: :"thread.run.step.delta" - # @!parse - # # Occurs when parts of a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # are being streamed. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] - # # @param event [Symbol, :"thread.run.step.delta"] - # # - # def initialize(data:, event: :"thread.run.step.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta} for more details. + # + # Occurs when parts of a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # are being streamed. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] Represents a run step delta i.e. any changed fields on a run step during streami + # + # @param event [Symbol, :"thread.run.step.delta"] end - class ThreadRunStepCompleted < OpenAI::BaseModel + class ThreadRunStepCompleted < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.completed"] required :event, const: :"thread.run.step.completed" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is completed. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.completed"] - # # - # def initialize(data:, event: :"thread.run.step.completed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is completed. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.completed"] end - class ThreadRunStepFailed < OpenAI::BaseModel + class ThreadRunStepFailed < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.failed"] required :event, const: :"thread.run.step.failed" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # fails. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.failed"] - # # - # def initialize(data:, event: :"thread.run.step.failed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.failed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # fails. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.failed"] end - class ThreadRunStepCancelled < OpenAI::BaseModel + class ThreadRunStepCancelled < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.cancelled"] required :event, const: :"thread.run.step.cancelled" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # is cancelled. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.cancelled"] - # # - # def initialize(data:, event: :"thread.run.step.cancelled", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.cancelled") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is cancelled. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.cancelled"] end - class ThreadRunStepExpired < OpenAI::BaseModel + class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a step in execution of a run. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - required :data, -> { OpenAI::Models::Beta::Threads::Runs::RunStep } + required :data, -> { OpenAI::Beta::Threads::Runs::RunStep } # @!attribute event # # @return [Symbol, :"thread.run.step.expired"] required :event, const: :"thread.run.step.expired" - # @!parse - # # Occurs when a - # # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) - # # expires. - # # - # # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] - # # @param event [Symbol, :"thread.run.step.expired"] - # # - # def initialize(data:, event: :"thread.run.step.expired", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.step.expired") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired} for more + # details. + # + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # expires. + # + # @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run. + # + # @param event [Symbol, :"thread.run.step.expired"] end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired)] end end end diff --git a/lib/openai/models/beta/run_stream_event.rb b/lib/openai/models/beta/run_stream_event.rb index 1e792b0e..2bfe1450 100644 --- a/lib/openai/models/beta/run_stream_event.rb +++ b/lib/openai/models/beta/run_stream_event.rb @@ -3,293 +3,296 @@ module OpenAI module Models module Beta - # @abstract - # # Occurs when a new - # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. - class RunStreamEvent < OpenAI::Union + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + module RunStreamEvent + extend OpenAI::Internal::Type::Union + discriminator :event # Occurs when a new [run](https://platform.openai.com/docs/api-reference/runs/object) is created. - variant :"thread.run.created", -> { OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated } + variant :"thread.run.created", -> { OpenAI::Beta::RunStreamEvent::ThreadRunCreated } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `queued` status. - variant :"thread.run.queued", -> { OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued } + variant :"thread.run.queued", -> { OpenAI::Beta::RunStreamEvent::ThreadRunQueued } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an `in_progress` status. - variant :"thread.run.in_progress", -> { OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress } + variant :"thread.run.in_progress", -> { OpenAI::Beta::RunStreamEvent::ThreadRunInProgress } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `requires_action` status. - variant :"thread.run.requires_action", - -> { OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction } + variant :"thread.run.requires_action", -> { OpenAI::Beta::RunStreamEvent::ThreadRunRequiresAction } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed. - variant :"thread.run.completed", -> { OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted } + variant :"thread.run.completed", -> { OpenAI::Beta::RunStreamEvent::ThreadRunCompleted } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with status `incomplete`. - variant :"thread.run.incomplete", -> { OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete } + variant :"thread.run.incomplete", -> { OpenAI::Beta::RunStreamEvent::ThreadRunIncomplete } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails. - variant :"thread.run.failed", -> { OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed } + variant :"thread.run.failed", -> { OpenAI::Beta::RunStreamEvent::ThreadRunFailed } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `cancelling` status. - variant :"thread.run.cancelling", -> { OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling } + variant :"thread.run.cancelling", -> { OpenAI::Beta::RunStreamEvent::ThreadRunCancelling } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled. - variant :"thread.run.cancelled", -> { OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled } + variant :"thread.run.cancelled", -> { OpenAI::Beta::RunStreamEvent::ThreadRunCancelled } # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires. - variant :"thread.run.expired", -> { OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired } + variant :"thread.run.expired", -> { OpenAI::Beta::RunStreamEvent::ThreadRunExpired } - class ThreadRunCreated < OpenAI::BaseModel + class ThreadRunCreated < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.created"] required :event, const: :"thread.run.created" - # @!parse - # # Occurs when a new - # # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.created"] - # # - # def initialize(data:, event: :"thread.run.created", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.created") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated} for more details. + # + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.created"] end - class ThreadRunQueued < OpenAI::BaseModel + class ThreadRunQueued < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.queued"] required :event, const: :"thread.run.queued" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `queued` status. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.queued"] - # # - # def initialize(data:, event: :"thread.run.queued", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.queued") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `queued` status. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.queued"] end - class ThreadRunInProgress < OpenAI::BaseModel + class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.in_progress"] required :event, const: :"thread.run.in_progress" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to an `in_progress` status. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.in_progress"] - # # - # def initialize(data:, event: :"thread.run.in_progress", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.in_progress") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to an `in_progress` status. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.in_progress"] end - class ThreadRunRequiresAction < OpenAI::BaseModel + class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.requires_action"] required :event, const: :"thread.run.requires_action" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `requires_action` status. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.requires_action"] - # # - # def initialize(data:, event: :"thread.run.requires_action", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.requires_action") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction} for more + # details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `requires_action` status. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.requires_action"] end - class ThreadRunCompleted < OpenAI::BaseModel + class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.completed"] required :event, const: :"thread.run.completed" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # is completed. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.completed"] - # # - # def initialize(data:, event: :"thread.run.completed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is completed. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.completed"] end - class ThreadRunIncomplete < OpenAI::BaseModel + class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.incomplete"] required :event, const: :"thread.run.incomplete" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # ends with status `incomplete`. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.incomplete"] - # # - # def initialize(data:, event: :"thread.run.incomplete", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.incomplete") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # ends with status `incomplete`. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.incomplete"] end - class ThreadRunFailed < OpenAI::BaseModel + class ThreadRunFailed < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.failed"] required :event, const: :"thread.run.failed" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # fails. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.failed"] - # # - # def initialize(data:, event: :"thread.run.failed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.failed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # fails. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.failed"] end - class ThreadRunCancelling < OpenAI::BaseModel + class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.cancelling"] required :event, const: :"thread.run.cancelling" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # moves to a `cancelling` status. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.cancelling"] - # # - # def initialize(data:, event: :"thread.run.cancelling", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.cancelling") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `cancelling` status. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.cancelling"] end - class ThreadRunCancelled < OpenAI::BaseModel + class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.cancelled"] required :event, const: :"thread.run.cancelled" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # is cancelled. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.cancelled"] - # # - # def initialize(data:, event: :"thread.run.cancelled", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.cancelled") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is cancelled. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.cancelled"] end - class ThreadRunExpired < OpenAI::BaseModel + class ThreadRunExpired < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents an execution run on a - # [thread](https://platform.openai.com/docs/api-reference/threads). + # [thread](https://platform.openai.com/docs/api-reference/threads). # # @return [OpenAI::Models::Beta::Threads::Run] - required :data, -> { OpenAI::Models::Beta::Threads::Run } + required :data, -> { OpenAI::Beta::Threads::Run } # @!attribute event # # @return [Symbol, :"thread.run.expired"] required :event, const: :"thread.run.expired" - # @!parse - # # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) - # # expires. - # # - # # @param data [OpenAI::Models::Beta::Threads::Run] - # # @param event [Symbol, :"thread.run.expired"] - # # - # def initialize(data:, event: :"thread.run.expired", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, event: :"thread.run.expired") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired} for more details. + # + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # expires. + # + # @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r + # + # @param event [Symbol, :"thread.run.expired"] end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired)] end end end diff --git a/lib/openai/models/beta/thread.rb b/lib/openai/models/beta/thread.rb index 3de11c4c..21ee9dd0 100644 --- a/lib/openai/models/beta/thread.rb +++ b/lib/openai/models/beta/thread.rb @@ -3,7 +3,8 @@ module OpenAI module Models module Beta - class Thread < OpenAI::BaseModel + # @see OpenAI::Resources::Beta::Threads#create + class Thread < OpenAI::Internal::Type::BaseModel # @!attribute id # The identifier, which can be referenced in API endpoints. # @@ -18,14 +19,14 @@ class Thread < OpenAI::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - required :metadata, OpenAI::HashOf[String], nil?: true + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute object # The object type, which is always `thread`. @@ -35,100 +36,84 @@ class Thread < OpenAI::BaseModel # @!attribute tool_resources # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. # # @return [OpenAI::Models::Beta::Thread::ToolResources, nil] - required :tool_resources, -> { OpenAI::Models::Beta::Thread::ToolResources }, nil?: true + required :tool_resources, -> { OpenAI::Beta::Thread::ToolResources }, nil?: true - # @!parse - # # Represents a thread that contains - # # [messages](https://platform.openai.com/docs/api-reference/messages). - # # - # # @param id [String] - # # @param created_at [Integer] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param tool_resources [OpenAI::Models::Beta::Thread::ToolResources, nil] - # # @param object [Symbol, :thread] - # # - # def initialize(id:, created_at:, metadata:, tool_resources:, object: :thread, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, created_at:, metadata:, tool_resources:, object: :thread) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Thread} for more details. + # + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). + # + # @param id [String] The identifier, which can be referenced in API endpoints. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the thread was created. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param tool_resources [OpenAI::Models::Beta::Thread::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre + # + # @param object [Symbol, :thread] The object type, which is always `thread`. - class ToolResources < OpenAI::BaseModel - # @!attribute [r] code_interpreter + # @see OpenAI::Models::Beta::Thread#tool_resources + class ToolResources < OpenAI::Internal::Type::BaseModel + # @!attribute code_interpreter # # @return [OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter, nil] - optional :code_interpreter, -> { OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter } + optional :code_interpreter, -> { OpenAI::Beta::Thread::ToolResources::CodeInterpreter } - # @!parse - # # @return [OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter] - # attr_writer :code_interpreter - - # @!attribute [r] file_search + # @!attribute file_search # # @return [OpenAI::Models::Beta::Thread::ToolResources::FileSearch, nil] - optional :file_search, -> { OpenAI::Models::Beta::Thread::ToolResources::FileSearch } - - # @!parse - # # @return [OpenAI::Models::Beta::Thread::ToolResources::FileSearch] - # attr_writer :file_search + optional :file_search, -> { OpenAI::Beta::Thread::ToolResources::FileSearch } - # @!parse - # # A set of resources that are made available to the assistant's tools in this - # # thread. The resources are specific to the type of tool. For example, the - # # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # # tool requires a list of vector store IDs. - # # - # # @param code_interpreter [OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter] - # # @param file_search [OpenAI::Models::Beta::Thread::ToolResources::FileSearch] - # # - # def initialize(code_interpreter: nil, file_search: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(code_interpreter: nil, file_search: nil) + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + # + # @param code_interpreter [OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter] + # @param file_search [OpenAI::Models::Beta::Thread::ToolResources::FileSearch] - class CodeInterpreter < OpenAI::BaseModel - # @!attribute [r] file_ids + # @see OpenAI::Models::Beta::Thread::ToolResources#code_interpreter + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + # @!attribute file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :file_ids - - # @!parse - # # @param file_ids [Array] - # # - # def initialize(file_ids: nil, **) = super + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter} for more details. + # + # @param file_ids [Array] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made end - class FileSearch < OpenAI::BaseModel - # @!attribute [r] vector_store_ids + # @see OpenAI::Models::Beta::Thread::ToolResources#file_search + class FileSearch < OpenAI::Internal::Type::BaseModel + # @!attribute vector_store_ids # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. # # @return [Array, nil] - optional :vector_store_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :vector_store_ids + optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @param vector_store_ids [Array] - # # - # def initialize(vector_store_ids: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Thread::ToolResources::FileSearch} for more details. + # + # @param vector_store_ids [Array] The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/ end end end diff --git a/lib/openai/models/beta/thread_create_and_run_params.rb b/lib/openai/models/beta/thread_create_and_run_params.rb index 3cf817fa..ed371b08 100644 --- a/lib/openai/models/beta/thread_create_and_run_params.rb +++ b/lib/openai/models/beta/thread_create_and_run_params.rb @@ -3,580 +3,546 @@ module OpenAI module Models module Beta - class ThreadCreateAndRunParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads#create_and_run + # + # @see OpenAI::Resources::Beta::Threads#stream_raw + class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute assistant_id # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. # # @return [String] required :assistant_id, String # @!attribute instructions # Override the default system message of the assistant. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. # # @return [String, nil] optional :instructions, String, nil?: true # @!attribute max_completion_tokens # The maximum number of completion tokens that may be used over the course of the - # run. The run will make a best effort to use only the number of completion tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # completion tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. # # @return [Integer, nil] optional :max_completion_tokens, Integer, nil?: true # @!attribute max_prompt_tokens # The maximum number of prompt tokens that may be used over the course of the run. - # The run will make a best effort to use only the number of prompt tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # prompt tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. # # @return [Integer, nil] optional :max_prompt_tokens, Integer, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute model # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. # # @return [String, Symbol, OpenAI::Models::ChatModel, nil] - optional :model, union: -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Model }, nil?: true + optional :model, union: -> { OpenAI::Beta::ThreadCreateAndRunParams::Model }, nil?: true - # @!attribute [r] parallel_tool_calls + # @!attribute parallel_tool_calls # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. # # @return [Boolean, nil] - optional :parallel_tool_calls, OpenAI::BooleanModel - - # @!parse - # # @return [Boolean] - # attr_writer :parallel_tool_calls + optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. - # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - optional :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true + optional :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # # @return [Float, nil] optional :temperature, Float, nil?: true - # @!attribute [r] thread + # @!attribute thread # Options to create a new thread. If no thread is provided when running a request, - # an empty thread will be created. + # an empty thread will be created. # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, nil] - optional :thread, -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread } - - # @!parse - # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread] - # attr_writer :thread + optional :thread, -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread } # @!attribute tool_choice # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # # @return [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] - optional :tool_choice, union: -> { OpenAI::Models::Beta::AssistantToolChoiceOption }, nil?: true + optional :tool_choice, union: -> { OpenAI::Beta::AssistantToolChoiceOption }, nil?: true # @!attribute tool_resources # A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] - optional :tool_resources, -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources }, nil?: true + optional :tool_resources, -> { OpenAI::Beta::ThreadCreateAndRunParams::ToolResources }, nil?: true # @!attribute tools # Override the tools the assistant can use for this run. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. # # @return [Array, nil] optional :tools, - -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::ThreadCreateAndRunParams::Tool] }, + -> { + OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool] + }, nil?: true # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true # @!attribute truncation_strategy # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the initial context window of the run. # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] optional :truncation_strategy, - -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy }, + -> { OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy }, nil?: true - # @!parse - # # @param assistant_id [String] - # # @param instructions [String, nil] - # # @param max_completion_tokens [Integer, nil] - # # @param max_prompt_tokens [Integer, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param model [String, Symbol, OpenAI::Models::ChatModel, nil] - # # @param parallel_tool_calls [Boolean] - # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - # # @param temperature [Float, nil] - # # @param thread [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread] - # # @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] - # # @param tool_resources [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] - # # @param tools [Array, nil] - # # @param top_p [Float, nil] - # # @param truncation_strategy [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # assistant_id:, - # instructions: nil, - # max_completion_tokens: nil, - # max_prompt_tokens: nil, - # metadata: nil, - # model: nil, - # parallel_tool_calls: nil, - # response_format: nil, - # temperature: nil, - # thread: nil, - # tool_choice: nil, - # tool_resources: nil, - # tools: nil, - # top_p: nil, - # truncation_strategy: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(assistant_id:, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, response_format: nil, temperature: nil, thread: nil, tool_choice: nil, tool_resources: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams} for more details. + # + # @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista + # + # @param instructions [String, nil] Override the default system message of the assistant. This is useful for modifyi + # + # @param max_completion_tokens [Integer, nil] The maximum number of completion tokens that may be used over the course of the + # + # @param max_prompt_tokens [Integer, nil] The maximum number of prompt tokens that may be used over the course of the run. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String, Symbol, OpenAI::Models::ChatModel, nil] The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # + # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g # + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: + # + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m + # + # @param thread [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread] Options to create a new thread. If no thread is provided when running a + # + # @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Controls which (if any) tool is called by the model. + # + # @param tool_resources [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe + # + # @param tools [Array, nil] Override the tools the assistant can use for this run. This is useful for modify + # + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the + # + # @param truncation_strategy [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. - class Model < OpenAI::Union + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. + module Model + extend OpenAI::Internal::Type::Union + variant String # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - variant enum: -> { OpenAI::Models::ChatModel } + variant enum: -> { OpenAI::ChatModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::ChatModel)] end - class Thread < OpenAI::BaseModel - # @!attribute [r] messages + class Thread < OpenAI::Internal::Type::BaseModel + # @!attribute messages # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to - # start the thread with. + # start the thread with. # # @return [Array, nil] optional :messages, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message] } - - # @!parse - # # @return [Array] - # attr_writer :messages + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message] } # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute tool_resources # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources, nil] optional :tool_resources, - -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources }, + -> { + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources + }, nil?: true - # @!parse - # # Options to create a new thread. If no thread is provided when running a request, - # # an empty thread will be created. - # # - # # @param messages [Array] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param tool_resources [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources, nil] - # # - # def initialize(messages: nil, metadata: nil, tool_resources: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(messages: nil, metadata: nil, tool_resources: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread} for more details. + # + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. + # + # @param messages [Array] A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param tool_resources [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre - class Message < OpenAI::BaseModel + class Message < OpenAI::Internal::Type::BaseModel # @!attribute content # The text contents of the message. # # @return [String, Array] - required :content, union: -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Content } + required :content, union: -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Content } # @!attribute role # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @return [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Role] - required :role, enum: -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Role } + required :role, enum: -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Role } # @!attribute attachments # A list of files attached to the message, and the tools they should be added to. # # @return [Array, nil] optional :attachments, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment] }, + -> { + OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment] + }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true - - # @!parse - # # @param content [String, Array] - # # @param role [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Role] - # # @param attachments [Array, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # - # def initialize(content:, role:, attachments: nil, metadata: nil, **) = super + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content:, role:, attachments: nil, metadata: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message} for more + # details. + # + # @param content [String, Array] The text contents of the message. # + # @param role [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Role] The role of the entity that is creating the message. Allowed values include: + # + # @param attachments [Array, nil] A list of files attached to the message, and the tools they should be added to. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # The text contents of the message. - class Content < OpenAI::Union - MessageContentPartParamArray = OpenAI::ArrayOf[union: -> { OpenAI::Models::Beta::Threads::MessageContentPartParam }] + # + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message#content + module Content + extend OpenAI::Internal::Type::Union # The text contents of the message. variant String # An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models). - variant OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Content::MessageContentPartParamArray + variant -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Content::MessageContentPartParamArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + MessageContentPartParamArray = + OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Beta::Threads::MessageContentPartParam }] end - # @abstract - # # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. - class Role < OpenAI::Enum + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + # + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message#role + module Role + extend OpenAI::Internal::Type::Enum + USER = :user ASSISTANT = :assistant - finalize! + # @!method self.values + # @return [Array] end - class Attachment < OpenAI::BaseModel - # @!attribute [r] file_id + class Attachment < OpenAI::Internal::Type::BaseModel + # @!attribute file_id # The ID of the file to attach to the message. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!attribute [r] tools + # @!attribute tools # The tools to add this file to. # # @return [Array, nil] optional :tools, - -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool] } - - # @!parse - # # @return [Array] - # attr_writer :tools + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool] } - # @!parse - # # @param file_id [String] - # # @param tools [Array] - # # - # def initialize(file_id: nil, tools: nil, **) = super + # @!method initialize(file_id: nil, tools: nil) + # @param file_id [String] The ID of the file to attach to the message. + # + # @param tools [Array] The tools to add this file to. - # def initialize: (Hash | OpenAI::BaseModel) -> void + module Tool + extend OpenAI::Internal::Type::Union - # @abstract - # - class Tool < OpenAI::Union discriminator :type - variant :code_interpreter, -> { OpenAI::Models::Beta::CodeInterpreterTool } + variant :code_interpreter, -> { OpenAI::Beta::CodeInterpreterTool } variant :file_search, - -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch } + -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool being defined: `file_search` # # @return [Symbol, :file_search] required :type, const: :file_search - # @!parse - # # @param type [Symbol, :file_search] - # # - # def initialize(type: :file_search, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :file_search) + # @param type [Symbol, :file_search] The type of tool being defined: `file_search` end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch)] end end end - class ToolResources < OpenAI::BaseModel - # @!attribute [r] code_interpreter + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread#tool_resources + class ToolResources < OpenAI::Internal::Type::BaseModel + # @!attribute code_interpreter # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, nil] optional :code_interpreter, - -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter } - - # @!parse - # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter] - # attr_writer :code_interpreter + -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter } - # @!attribute [r] file_search + # @!attribute file_search # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch, nil] - optional :file_search, - -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch } - - # @!parse - # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch] - # attr_writer :file_search - - # @!parse - # # A set of resources that are made available to the assistant's tools in this - # # thread. The resources are specific to the type of tool. For example, the - # # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # # tool requires a list of vector store IDs. - # # - # # @param code_interpreter [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter] - # # @param file_search [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch] - # # - # def initialize(code_interpreter: nil, file_search: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class CodeInterpreter < OpenAI::BaseModel - # @!attribute [r] file_ids + optional :file_search, -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch } + + # @!method initialize(code_interpreter: nil, file_search: nil) + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + # + # @param code_interpreter [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter] + # @param file_search [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch] + + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources#code_interpreter + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + # @!attribute file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :file_ids + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @param file_ids [Array] - # # - # def initialize(file_ids: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter} + # for more details. + # + # @param file_ids [Array] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made end - class FileSearch < OpenAI::BaseModel - # @!attribute [r] vector_store_ids + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources#file_search + class FileSearch < OpenAI::Internal::Type::BaseModel + # @!attribute vector_store_ids # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. # # @return [Array, nil] - optional :vector_store_ids, OpenAI::ArrayOf[String] + optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @return [Array] - # attr_writer :vector_store_ids - - # @!attribute [r] vector_stores + # @!attribute vector_stores # A helper to create a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # with file_ids and attach it to this thread. There can be a maximum of 1 vector - # store attached to the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. # # @return [Array, nil] optional :vector_stores, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] } - - # @!parse - # # @return [Array] - # attr_writer :vector_stores + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] } - # @!parse - # # @param vector_store_ids [Array] - # # @param vector_stores [Array] - # # - # def initialize(vector_store_ids: nil, vector_stores: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_ids: nil, vector_stores: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch} + # for more details. + # + # @param vector_store_ids [Array] The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/ + # + # @param vector_stores [Array] A helper to create a [vector store](https://platform.openai.com/docs/api-referen - class VectorStore < OpenAI::BaseModel - # @!attribute [r] chunking_strategy + class VectorStore < OpenAI::Internal::Type::BaseModel + # @!attribute chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, nil] optional :chunking_strategy, - union: -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy } - - # @!parse - # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] - # attr_writer :chunking_strategy + union: -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy } - # @!attribute [r] file_ids + # @!attribute file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to - # add to the vector store. There can be a maximum of 10000 files in a vector - # store. + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :file_ids + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true - - # @!parse - # # @param chunking_strategy [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] - # # @param file_ids [Array] - # # @param metadata [Hash{Symbol=>String}, nil] - # # - # def initialize(chunking_strategy: nil, file_ids: nil, metadata: nil, **) = super + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(chunking_strategy: nil, file_ids: nil, metadata: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore} + # for more details. + # + # @param chunking_strategy [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] The chunking strategy used to chunk the file(s). If not set, will use the `auto` # + # @param file_ids [Array] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to ad + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. - class ChunkingStrategy < OpenAI::Union + # strategy. + # + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore#chunking_strategy + module ChunkingStrategy + extend OpenAI::Internal::Type::Union + discriminator :type # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. variant :auto, - -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto } + -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto } variant :static, - -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static } + -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static } - class Auto < OpenAI::BaseModel + class Auto < OpenAI::Internal::Type::BaseModel # @!attribute type # Always `auto`. # # @return [Symbol, :auto] required :type, const: :auto - # @!parse - # # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # # `800` and `chunk_overlap_tokens` of `400`. - # # - # # @param type [Symbol, :auto] - # # - # def initialize(type: :auto, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :auto) + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. + # + # @param type [Symbol, :auto] Always `auto`. end - class Static < OpenAI::BaseModel + class Static < OpenAI::Internal::Type::BaseModel # @!attribute static # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static] required :static, - -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static } + -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static } # @!attribute type # Always `static`. @@ -584,171 +550,148 @@ class Static < OpenAI::BaseModel # @return [Symbol, :static] required :type, const: :static - # @!parse - # # @param static [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static] - # # @param type [Symbol, :static] - # # - # def initialize(static:, type: :static, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(static:, type: :static) + # @param static [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static] + # + # @param type [Symbol, :static] Always `static`. - class Static < OpenAI::BaseModel + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static#static + class Static < OpenAI::Internal::Type::BaseModel # @!attribute chunk_overlap_tokens # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. # # @return [Integer] required :chunk_overlap_tokens, Integer # @!attribute max_chunk_size_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. # # @return [Integer] required :max_chunk_size_tokens, Integer - # @!parse - # # @param chunk_overlap_tokens [Integer] - # # @param max_chunk_size_tokens [Integer] - # # - # def initialize(chunk_overlap_tokens:, max_chunk_size_tokens:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(chunk_overlap_tokens:, max_chunk_size_tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static} + # for more details. + # + # @param chunk_overlap_tokens [Integer] The number of tokens that overlap between chunks. The default value is `400`. + # + # @param max_chunk_size_tokens [Integer] The maximum number of tokens in each chunk. The default value is `800`. The mini end end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static)] end end end end end - class ToolResources < OpenAI::BaseModel - # @!attribute [r] code_interpreter + class ToolResources < OpenAI::Internal::Type::BaseModel + # @!attribute code_interpreter # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, nil] - optional :code_interpreter, - -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter } + optional :code_interpreter, -> { OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter } - # @!parse - # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter] - # attr_writer :code_interpreter - - # @!attribute [r] file_search + # @!attribute file_search # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch, nil] - optional :file_search, -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch } - - # @!parse - # # @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch] - # attr_writer :file_search - - # @!parse - # # A set of resources that are used by the assistant's tools. The resources are - # # specific to the type of tool. For example, the `code_interpreter` tool requires - # # a list of file IDs, while the `file_search` tool requires a list of vector store - # # IDs. - # # - # # @param code_interpreter [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter] - # # @param file_search [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch] - # # - # def initialize(code_interpreter: nil, file_search: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class CodeInterpreter < OpenAI::BaseModel - # @!attribute [r] file_ids + optional :file_search, -> { OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch } + + # @!method initialize(code_interpreter: nil, file_search: nil) + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + # + # @param code_interpreter [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter] + # @param file_search [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch] + + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources#code_interpreter + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + # @!attribute file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :file_ids - - # @!parse - # # @param file_ids [Array] - # # - # def initialize(file_ids: nil, **) = super + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter} + # for more details. + # + # @param file_ids [Array] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made end - class FileSearch < OpenAI::BaseModel - # @!attribute [r] vector_store_ids + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources#file_search + class FileSearch < OpenAI::Internal::Type::BaseModel + # @!attribute vector_store_ids # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this assistant. There can be a maximum of 1 vector store attached to - # the assistant. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. # # @return [Array, nil] - optional :vector_store_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :vector_store_ids + optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @param vector_store_ids [Array] - # # - # def initialize(vector_store_ids: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch} for + # more details. + # + # @param vector_store_ids [Array] The ID of the [vector store](https://platform.openai.com/docs/api-reference/vect end end - # @abstract - # - class Tool < OpenAI::Union - variant -> { OpenAI::Models::Beta::CodeInterpreterTool } - - variant -> { OpenAI::Models::Beta::FileSearchTool } - - variant -> { OpenAI::Models::Beta::FunctionTool } - end - - class TruncationStrategy < OpenAI::BaseModel + class TruncationStrategy < OpenAI::Internal::Type::BaseModel # @!attribute type # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. # # @return [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type] - required :type, enum: -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type } + required :type, enum: -> { OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type } # @!attribute last_messages # The number of most recent messages from the thread when constructing the context - # for the run. + # for the run. # # @return [Integer, nil] optional :last_messages, Integer, nil?: true - # @!parse - # # Controls for how a thread will be truncated prior to the run. Use this to - # # control the intial context window of the run. - # # - # # @param type [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type] - # # @param last_messages [Integer, nil] - # # - # def initialize(type:, last_messages: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(type:, last_messages: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy} for more + # details. + # + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. # + # @param type [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to + # + # @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context + # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. - class Type < OpenAI::Enum + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + # + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy#type + module Type + extend OpenAI::Internal::Type::Enum + AUTO = :auto LAST_MESSAGES = :last_messages - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/thread_create_params.rb b/lib/openai/models/beta/thread_create_params.rb index 46dfaee7..02d8accc 100644 --- a/lib/openai/models/beta/thread_create_params.rb +++ b/lib/openai/models/beta/thread_create_params.rb @@ -3,358 +3,327 @@ module OpenAI module Models module Beta - class ThreadCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads#create + class ThreadCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] messages + # @!attribute messages # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to - # start the thread with. + # start the thread with. # # @return [Array, nil] - optional :messages, -> { OpenAI::ArrayOf[OpenAI::Models::Beta::ThreadCreateParams::Message] } - - # @!parse - # # @return [Array] - # attr_writer :messages + optional :messages, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateParams::Message] } # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute tool_resources # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. # # @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources, nil] - optional :tool_resources, -> { OpenAI::Models::Beta::ThreadCreateParams::ToolResources }, nil?: true - - # @!parse - # # @param messages [Array] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param tool_resources [OpenAI::Models::Beta::ThreadCreateParams::ToolResources, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(messages: nil, metadata: nil, tool_resources: nil, request_options: {}, **) = super + optional :tool_resources, -> { OpenAI::Beta::ThreadCreateParams::ToolResources }, nil?: true - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(messages: nil, metadata: nil, tool_resources: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateParams} for more details. + # + # @param messages [Array] A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param tool_resources [OpenAI::Models::Beta::ThreadCreateParams::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - class Message < OpenAI::BaseModel + class Message < OpenAI::Internal::Type::BaseModel # @!attribute content # The text contents of the message. # # @return [String, Array] - required :content, union: -> { OpenAI::Models::Beta::ThreadCreateParams::Message::Content } + required :content, union: -> { OpenAI::Beta::ThreadCreateParams::Message::Content } # @!attribute role # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @return [Symbol, OpenAI::Models::Beta::ThreadCreateParams::Message::Role] - required :role, enum: -> { OpenAI::Models::Beta::ThreadCreateParams::Message::Role } + required :role, enum: -> { OpenAI::Beta::ThreadCreateParams::Message::Role } # @!attribute attachments # A list of files attached to the message, and the tools they should be added to. # # @return [Array, nil] optional :attachments, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment] }, + -> { + OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateParams::Message::Attachment] + }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true - - # @!parse - # # @param content [String, Array] - # # @param role [Symbol, OpenAI::Models::Beta::ThreadCreateParams::Message::Role] - # # @param attachments [Array, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # - # def initialize(content:, role:, attachments: nil, metadata: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @abstract + # @!method initialize(content:, role:, attachments: nil, metadata: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateParams::Message} for more details. # + # @param content [String, Array] The text contents of the message. + # + # @param role [Symbol, OpenAI::Models::Beta::ThreadCreateParams::Message::Role] The role of the entity that is creating the message. Allowed values include: + # + # @param attachments [Array, nil] A list of files attached to the message, and the tools they should be added to. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # The text contents of the message. - class Content < OpenAI::Union - MessageContentPartParamArray = OpenAI::ArrayOf[union: -> { OpenAI::Models::Beta::Threads::MessageContentPartParam }] + # + # @see OpenAI::Models::Beta::ThreadCreateParams::Message#content + module Content + extend OpenAI::Internal::Type::Union # The text contents of the message. variant String # An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models). - variant OpenAI::Models::Beta::ThreadCreateParams::Message::Content::MessageContentPartParamArray + variant -> { OpenAI::Models::Beta::ThreadCreateParams::Message::Content::MessageContentPartParamArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + MessageContentPartParamArray = + OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Beta::Threads::MessageContentPartParam }] end - # @abstract - # # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. - class Role < OpenAI::Enum + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + # + # @see OpenAI::Models::Beta::ThreadCreateParams::Message#role + module Role + extend OpenAI::Internal::Type::Enum + USER = :user ASSISTANT = :assistant - finalize! + # @!method self.values + # @return [Array] end - class Attachment < OpenAI::BaseModel - # @!attribute [r] file_id + class Attachment < OpenAI::Internal::Type::BaseModel + # @!attribute file_id # The ID of the file to attach to the message. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!attribute [r] tools + # @!attribute tools # The tools to add this file to. # # @return [Array, nil] optional :tools, - -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool] } - - # @!parse - # # @return [Array] - # attr_writer :tools + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool] } - # @!parse - # # @param file_id [String] - # # @param tools [Array] - # # - # def initialize(file_id: nil, tools: nil, **) = super + # @!method initialize(file_id: nil, tools: nil) + # @param file_id [String] The ID of the file to attach to the message. + # + # @param tools [Array] The tools to add this file to. - # def initialize: (Hash | OpenAI::BaseModel) -> void + module Tool + extend OpenAI::Internal::Type::Union - # @abstract - # - class Tool < OpenAI::Union discriminator :type - variant :code_interpreter, -> { OpenAI::Models::Beta::CodeInterpreterTool } + variant :code_interpreter, -> { OpenAI::Beta::CodeInterpreterTool } - variant :file_search, - -> { OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch } + variant :file_search, -> { OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool being defined: `file_search` # # @return [Symbol, :file_search] required :type, const: :file_search - # @!parse - # # @param type [Symbol, :file_search] - # # - # def initialize(type: :file_search, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :file_search) + # @param type [Symbol, :file_search] The type of tool being defined: `file_search` end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch)] end end end - class ToolResources < OpenAI::BaseModel - # @!attribute [r] code_interpreter + class ToolResources < OpenAI::Internal::Type::BaseModel + # @!attribute code_interpreter # # @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, nil] - optional :code_interpreter, - -> { OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter } + optional :code_interpreter, -> { OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter } - # @!parse - # # @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter] - # attr_writer :code_interpreter - - # @!attribute [r] file_search + # @!attribute file_search # # @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch, nil] - optional :file_search, -> { OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch } - - # @!parse - # # @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch] - # attr_writer :file_search - - # @!parse - # # A set of resources that are made available to the assistant's tools in this - # # thread. The resources are specific to the type of tool. For example, the - # # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # # tool requires a list of vector store IDs. - # # - # # @param code_interpreter [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter] - # # @param file_search [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch] - # # - # def initialize(code_interpreter: nil, file_search: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class CodeInterpreter < OpenAI::BaseModel - # @!attribute [r] file_ids + optional :file_search, -> { OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch } + + # @!method initialize(code_interpreter: nil, file_search: nil) + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + # + # @param code_interpreter [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter] + # @param file_search [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch] + + # @see OpenAI::Models::Beta::ThreadCreateParams::ToolResources#code_interpreter + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + # @!attribute file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @return [Array] - # attr_writer :file_ids - - # @!parse - # # @param file_ids [Array] - # # - # def initialize(file_ids: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter} for + # more details. + # + # @param file_ids [Array] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made end - class FileSearch < OpenAI::BaseModel - # @!attribute [r] vector_store_ids + # @see OpenAI::Models::Beta::ThreadCreateParams::ToolResources#file_search + class FileSearch < OpenAI::Internal::Type::BaseModel + # @!attribute vector_store_ids # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. # # @return [Array, nil] - optional :vector_store_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :vector_store_ids + optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!attribute [r] vector_stores + # @!attribute vector_stores # A helper to create a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # with file_ids and attach it to this thread. There can be a maximum of 1 vector - # store attached to the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. # # @return [Array, nil] optional :vector_stores, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] } + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] } - # @!parse - # # @return [Array] - # attr_writer :vector_stores - - # @!parse - # # @param vector_store_ids [Array] - # # @param vector_stores [Array] - # # - # def initialize(vector_store_ids: nil, vector_stores: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_ids: nil, vector_stores: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch} for more + # details. + # + # @param vector_store_ids [Array] The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/ + # + # @param vector_stores [Array] A helper to create a [vector store](https://platform.openai.com/docs/api-referen - class VectorStore < OpenAI::BaseModel - # @!attribute [r] chunking_strategy + class VectorStore < OpenAI::Internal::Type::BaseModel + # @!attribute chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. + # strategy. # # @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, nil] optional :chunking_strategy, - union: -> { OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy } - - # @!parse - # # @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] - # attr_writer :chunking_strategy + union: -> { OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy } - # @!attribute [r] file_ids + # @!attribute file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to - # add to the vector store. There can be a maximum of 10000 files in a vector - # store. + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :file_ids + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true - - # @!parse - # # @param chunking_strategy [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] - # # @param file_ids [Array] - # # @param metadata [Hash{Symbol=>String}, nil] - # # - # def initialize(chunking_strategy: nil, file_ids: nil, metadata: nil, **) = super + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(chunking_strategy: nil, file_ids: nil, metadata: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore} + # for more details. + # + # @param chunking_strategy [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] The chunking strategy used to chunk the file(s). If not set, will use the `auto` # + # @param file_ids [Array] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to ad + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. - class ChunkingStrategy < OpenAI::Union + # strategy. + # + # @see OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore#chunking_strategy + module ChunkingStrategy + extend OpenAI::Internal::Type::Union + discriminator :type # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. variant :auto, - -> { OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto } + -> { OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto } variant :static, - -> { OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static } + -> { OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static } - class Auto < OpenAI::BaseModel + class Auto < OpenAI::Internal::Type::BaseModel # @!attribute type # Always `auto`. # # @return [Symbol, :auto] required :type, const: :auto - # @!parse - # # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - # # `800` and `chunk_overlap_tokens` of `400`. - # # - # # @param type [Symbol, :auto] - # # - # def initialize(type: :auto, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :auto) + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. + # + # @param type [Symbol, :auto] Always `auto`. end - class Static < OpenAI::BaseModel + class Static < OpenAI::Internal::Type::BaseModel # @!attribute static # # @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static] required :static, - -> { OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static } + -> { OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static } # @!attribute type # Always `static`. @@ -362,39 +331,41 @@ class Static < OpenAI::BaseModel # @return [Symbol, :static] required :type, const: :static - # @!parse - # # @param static [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static] - # # @param type [Symbol, :static] - # # - # def initialize(static:, type: :static, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(static:, type: :static) + # @param static [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static] + # + # @param type [Symbol, :static] Always `static`. - class Static < OpenAI::BaseModel + # @see OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static#static + class Static < OpenAI::Internal::Type::BaseModel # @!attribute chunk_overlap_tokens # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. # # @return [Integer] required :chunk_overlap_tokens, Integer # @!attribute max_chunk_size_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. # # @return [Integer] required :max_chunk_size_tokens, Integer - # @!parse - # # @param chunk_overlap_tokens [Integer] - # # @param max_chunk_size_tokens [Integer] - # # - # def initialize(chunk_overlap_tokens:, max_chunk_size_tokens:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(chunk_overlap_tokens:, max_chunk_size_tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static} + # for more details. + # + # @param chunk_overlap_tokens [Integer] The number of tokens that overlap between chunks. The default value is `400`. + # + # @param max_chunk_size_tokens [Integer] The maximum number of tokens in each chunk. The default value is `800`. The mini end end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static)] end end end diff --git a/lib/openai/models/beta/thread_delete_params.rb b/lib/openai/models/beta/thread_delete_params.rb index bff19a8a..308e47db 100644 --- a/lib/openai/models/beta/thread_delete_params.rb +++ b/lib/openai/models/beta/thread_delete_params.rb @@ -3,17 +3,13 @@ module OpenAI module Models module Beta - class ThreadDeleteParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads#delete + class ThreadDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/thread_deleted.rb b/lib/openai/models/beta/thread_deleted.rb index f27336b9..862e25fc 100644 --- a/lib/openai/models/beta/thread_deleted.rb +++ b/lib/openai/models/beta/thread_deleted.rb @@ -3,7 +3,8 @@ module OpenAI module Models module Beta - class ThreadDeleted < OpenAI::BaseModel + # @see OpenAI::Resources::Beta::Threads#delete + class ThreadDeleted < OpenAI::Internal::Type::BaseModel # @!attribute id # # @return [String] @@ -12,21 +13,17 @@ class ThreadDeleted < OpenAI::BaseModel # @!attribute deleted # # @return [Boolean] - required :deleted, OpenAI::BooleanModel + required :deleted, OpenAI::Internal::Type::Boolean # @!attribute object # # @return [Symbol, :"thread.deleted"] required :object, const: :"thread.deleted" - # @!parse - # # @param id [String] - # # @param deleted [Boolean] - # # @param object [Symbol, :"thread.deleted"] - # # - # def initialize(id:, deleted:, object: :"thread.deleted", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, deleted:, object: :"thread.deleted") + # @param id [String] + # @param deleted [Boolean] + # @param object [Symbol, :"thread.deleted"] end end end diff --git a/lib/openai/models/beta/thread_retrieve_params.rb b/lib/openai/models/beta/thread_retrieve_params.rb index cd0bc3c8..c27f0bf6 100644 --- a/lib/openai/models/beta/thread_retrieve_params.rb +++ b/lib/openai/models/beta/thread_retrieve_params.rb @@ -3,17 +3,13 @@ module OpenAI module Models module Beta - class ThreadRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads#retrieve + class ThreadRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/thread_stream_event.rb b/lib/openai/models/beta/thread_stream_event.rb index f24a473b..2af595d4 100644 --- a/lib/openai/models/beta/thread_stream_event.rb +++ b/lib/openai/models/beta/thread_stream_event.rb @@ -3,41 +3,38 @@ module OpenAI module Models module Beta - class ThreadStreamEvent < OpenAI::BaseModel + class ThreadStreamEvent < OpenAI::Internal::Type::BaseModel # @!attribute data # Represents a thread that contains - # [messages](https://platform.openai.com/docs/api-reference/messages). + # [messages](https://platform.openai.com/docs/api-reference/messages). # # @return [OpenAI::Models::Beta::Thread] - required :data, -> { OpenAI::Models::Beta::Thread } + required :data, -> { OpenAI::Beta::Thread } # @!attribute event # # @return [Symbol, :"thread.created"] required :event, const: :"thread.created" - # @!attribute [r] enabled + # @!attribute enabled # Whether to enable input audio transcription. # # @return [Boolean, nil] - optional :enabled, OpenAI::BooleanModel + optional :enabled, OpenAI::Internal::Type::Boolean - # @!parse - # # @return [Boolean] - # attr_writer :enabled - - # @!parse - # # Occurs when a new - # # [thread](https://platform.openai.com/docs/api-reference/threads/object) is - # # created. - # # - # # @param data [OpenAI::Models::Beta::Thread] - # # @param enabled [Boolean] - # # @param event [Symbol, :"thread.created"] - # # - # def initialize(data:, enabled: nil, event: :"thread.created", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, enabled: nil, event: :"thread.created") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadStreamEvent} for more details. + # + # Occurs when a new + # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # created. + # + # @param data [OpenAI::Models::Beta::Thread] Represents a thread that contains [messages](https://platform.openai.com/docs/ap + # + # @param enabled [Boolean] Whether to enable input audio transcription. + # + # @param event [Symbol, :"thread.created"] end end end diff --git a/lib/openai/models/beta/thread_update_params.rb b/lib/openai/models/beta/thread_update_params.rb index 2d9eeab2..f2ddde6e 100644 --- a/lib/openai/models/beta/thread_update_params.rb +++ b/lib/openai/models/beta/thread_update_params.rb @@ -3,114 +3,96 @@ module OpenAI module Models module Beta - class ThreadUpdateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads#update + class ThreadUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute tool_resources # A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. # # @return [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources, nil] - optional :tool_resources, -> { OpenAI::Models::Beta::ThreadUpdateParams::ToolResources }, nil?: true + optional :tool_resources, -> { OpenAI::Beta::ThreadUpdateParams::ToolResources }, nil?: true - # @!parse - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param tool_resources [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(metadata: nil, tool_resources: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(metadata: nil, tool_resources: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadUpdateParams} for more details. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param tool_resources [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - class ToolResources < OpenAI::BaseModel - # @!attribute [r] code_interpreter + class ToolResources < OpenAI::Internal::Type::BaseModel + # @!attribute code_interpreter # # @return [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, nil] - optional :code_interpreter, - -> { OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter } + optional :code_interpreter, -> { OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter } - # @!parse - # # @return [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter] - # attr_writer :code_interpreter - - # @!attribute [r] file_search + # @!attribute file_search # # @return [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch, nil] - optional :file_search, -> { OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch } - - # @!parse - # # @return [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch] - # attr_writer :file_search - - # @!parse - # # A set of resources that are made available to the assistant's tools in this - # # thread. The resources are specific to the type of tool. For example, the - # # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # # tool requires a list of vector store IDs. - # # - # # @param code_interpreter [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter] - # # @param file_search [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch] - # # - # def initialize(code_interpreter: nil, file_search: nil, **) = super + optional :file_search, -> { OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch } - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(code_interpreter: nil, file_search: nil) + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + # + # @param code_interpreter [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter] + # @param file_search [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch] - class CodeInterpreter < OpenAI::BaseModel - # @!attribute [r] file_ids + # @see OpenAI::Models::Beta::ThreadUpdateParams::ToolResources#code_interpreter + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + # @!attribute file_ids # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made - # available to the `code_interpreter` tool. There can be a maximum of 20 files - # associated with the tool. + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :file_ids + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @param file_ids [Array] - # # - # def initialize(file_ids: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter} for + # more details. + # + # @param file_ids [Array] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made end - class FileSearch < OpenAI::BaseModel - # @!attribute [r] vector_store_ids + # @see OpenAI::Models::Beta::ThreadUpdateParams::ToolResources#file_search + class FileSearch < OpenAI::Internal::Type::BaseModel + # @!attribute vector_store_ids # The - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # attached to this thread. There can be a maximum of 1 vector store attached to - # the thread. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. # # @return [Array, nil] - optional :vector_store_ids, OpenAI::ArrayOf[String] + optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @return [Array] - # attr_writer :vector_store_ids - - # @!parse - # # @param vector_store_ids [Array] - # # - # def initialize(vector_store_ids: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_ids: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch} for more + # details. + # + # @param vector_store_ids [Array] The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/ end end end diff --git a/lib/openai/models/beta/threads/annotation.rb b/lib/openai/models/beta/threads/annotation.rb index 3e21c302..bcb67b49 100644 --- a/lib/openai/models/beta/threads/annotation.rb +++ b/lib/openai/models/beta/threads/annotation.rb @@ -4,19 +4,22 @@ module OpenAI module Models module Beta module Threads - # @abstract - # # A citation within the message that points to a specific quote from a specific - # File associated with the assistant or the message. Generated when the assistant - # uses the "file_search" tool to search files. - class Annotation < OpenAI::Union + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. + module Annotation + extend OpenAI::Internal::Type::Union + discriminator :type # A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - variant :file_citation, -> { OpenAI::Models::Beta::Threads::FileCitationAnnotation } + variant :file_citation, -> { OpenAI::Beta::Threads::FileCitationAnnotation } # A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. - variant :file_path, -> { OpenAI::Models::Beta::Threads::FilePathAnnotation } + variant :file_path, -> { OpenAI::Beta::Threads::FilePathAnnotation } + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::FileCitationAnnotation, OpenAI::Models::Beta::Threads::FilePathAnnotation)] end end end diff --git a/lib/openai/models/beta/threads/annotation_delta.rb b/lib/openai/models/beta/threads/annotation_delta.rb index dfaf1d9e..9eb54f5e 100644 --- a/lib/openai/models/beta/threads/annotation_delta.rb +++ b/lib/openai/models/beta/threads/annotation_delta.rb @@ -4,19 +4,22 @@ module OpenAI module Models module Beta module Threads - # @abstract - # # A citation within the message that points to a specific quote from a specific - # File associated with the assistant or the message. Generated when the assistant - # uses the "file_search" tool to search files. - class AnnotationDelta < OpenAI::Union + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. + module AnnotationDelta + extend OpenAI::Internal::Type::Union + discriminator :type # A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - variant :file_citation, -> { OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation } + variant :file_citation, -> { OpenAI::Beta::Threads::FileCitationDeltaAnnotation } # A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. - variant :file_path, -> { OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation } + variant :file_path, -> { OpenAI::Beta::Threads::FilePathDeltaAnnotation } + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation)] end end end diff --git a/lib/openai/models/beta/threads/file_citation_annotation.rb b/lib/openai/models/beta/threads/file_citation_annotation.rb index 0c3116f3..1d0a2a74 100644 --- a/lib/openai/models/beta/threads/file_citation_annotation.rb +++ b/lib/openai/models/beta/threads/file_citation_annotation.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class FileCitationAnnotation < OpenAI::BaseModel + class FileCitationAnnotation < OpenAI::Internal::Type::BaseModel # @!attribute end_index # # @return [Integer] @@ -13,7 +13,7 @@ class FileCitationAnnotation < OpenAI::BaseModel # @!attribute file_citation # # @return [OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation] - required :file_citation, -> { OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation } + required :file_citation, -> { OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation } # @!attribute start_index # @@ -32,34 +32,31 @@ class FileCitationAnnotation < OpenAI::BaseModel # @return [Symbol, :file_citation] required :type, const: :file_citation - # @!parse - # # A citation within the message that points to a specific quote from a specific - # # File associated with the assistant or the message. Generated when the assistant - # # uses the "file_search" tool to search files. - # # - # # @param end_index [Integer] - # # @param file_citation [OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation] - # # @param start_index [Integer] - # # @param text [String] - # # @param type [Symbol, :file_citation] - # # - # def initialize(end_index:, file_citation:, start_index:, text:, type: :file_citation, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(end_index:, file_citation:, start_index:, text:, type: :file_citation) + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. + # + # @param end_index [Integer] + # + # @param file_citation [OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation] + # + # @param start_index [Integer] + # + # @param text [String] The text in the message content that needs to be replaced. + # + # @param type [Symbol, :file_citation] Always `file_citation`. - class FileCitation < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::FileCitationAnnotation#file_citation + class FileCitation < OpenAI::Internal::Type::BaseModel # @!attribute file_id # The ID of the specific File the citation is from. # # @return [String] required :file_id, String - # @!parse - # # @param file_id [String] - # # - # def initialize(file_id:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_id:) + # @param file_id [String] The ID of the specific File the citation is from. end end end diff --git a/lib/openai/models/beta/threads/file_citation_delta_annotation.rb b/lib/openai/models/beta/threads/file_citation_delta_annotation.rb index 512bc7ad..db18b6e4 100644 --- a/lib/openai/models/beta/threads/file_citation_delta_annotation.rb +++ b/lib/openai/models/beta/threads/file_citation_delta_annotation.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class FileCitationDeltaAnnotation < OpenAI::BaseModel + class FileCitationDeltaAnnotation < OpenAI::Internal::Type::BaseModel # @!attribute index # The index of the annotation in the text content part. # @@ -17,87 +17,62 @@ class FileCitationDeltaAnnotation < OpenAI::BaseModel # @return [Symbol, :file_citation] required :type, const: :file_citation - # @!attribute [r] end_index + # @!attribute end_index # # @return [Integer, nil] optional :end_index, Integer - # @!parse - # # @return [Integer] - # attr_writer :end_index - - # @!attribute [r] file_citation + # @!attribute file_citation # # @return [OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, nil] - optional :file_citation, -> { OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation } - - # @!parse - # # @return [OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation] - # attr_writer :file_citation + optional :file_citation, -> { OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation } - # @!attribute [r] start_index + # @!attribute start_index # # @return [Integer, nil] optional :start_index, Integer - # @!parse - # # @return [Integer] - # attr_writer :start_index - - # @!attribute [r] text + # @!attribute text # The text in the message content that needs to be replaced. # # @return [String, nil] optional :text, String - # @!parse - # # @return [String] - # attr_writer :text - - # @!parse - # # A citation within the message that points to a specific quote from a specific - # # File associated with the assistant or the message. Generated when the assistant - # # uses the "file_search" tool to search files. - # # - # # @param index [Integer] - # # @param end_index [Integer] - # # @param file_citation [OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation] - # # @param start_index [Integer] - # # @param text [String] - # # @param type [Symbol, :file_citation] - # # - # def initialize(index:, end_index: nil, file_citation: nil, start_index: nil, text: nil, type: :file_citation, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, end_index: nil, file_citation: nil, start_index: nil, text: nil, type: :file_citation) + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. + # + # @param index [Integer] The index of the annotation in the text content part. + # + # @param end_index [Integer] + # + # @param file_citation [OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation] + # + # @param start_index [Integer] + # + # @param text [String] The text in the message content that needs to be replaced. + # + # @param type [Symbol, :file_citation] Always `file_citation`. - class FileCitation < OpenAI::BaseModel - # @!attribute [r] file_id + # @see OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation#file_citation + class FileCitation < OpenAI::Internal::Type::BaseModel + # @!attribute file_id # The ID of the specific File the citation is from. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!attribute [r] quote + # @!attribute quote # The specific quote in the file. # # @return [String, nil] optional :quote, String - # @!parse - # # @return [String] - # attr_writer :quote - - # @!parse - # # @param file_id [String] - # # @param quote [String] - # # - # def initialize(file_id: nil, quote: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_id: nil, quote: nil) + # @param file_id [String] The ID of the specific File the citation is from. + # + # @param quote [String] The specific quote in the file. end end end diff --git a/lib/openai/models/beta/threads/file_path_annotation.rb b/lib/openai/models/beta/threads/file_path_annotation.rb index a04561c4..6c4e70c1 100644 --- a/lib/openai/models/beta/threads/file_path_annotation.rb +++ b/lib/openai/models/beta/threads/file_path_annotation.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class FilePathAnnotation < OpenAI::BaseModel + class FilePathAnnotation < OpenAI::Internal::Type::BaseModel # @!attribute end_index # # @return [Integer] @@ -13,7 +13,7 @@ class FilePathAnnotation < OpenAI::BaseModel # @!attribute file_path # # @return [OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath] - required :file_path, -> { OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath } + required :file_path, -> { OpenAI::Beta::Threads::FilePathAnnotation::FilePath } # @!attribute start_index # @@ -32,33 +32,30 @@ class FilePathAnnotation < OpenAI::BaseModel # @return [Symbol, :file_path] required :type, const: :file_path - # @!parse - # # A URL for the file that's generated when the assistant used the - # # `code_interpreter` tool to generate a file. - # # - # # @param end_index [Integer] - # # @param file_path [OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath] - # # @param start_index [Integer] - # # @param text [String] - # # @param type [Symbol, :file_path] - # # - # def initialize(end_index:, file_path:, start_index:, text:, type: :file_path, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(end_index:, file_path:, start_index:, text:, type: :file_path) + # A URL for the file that's generated when the assistant used the + # `code_interpreter` tool to generate a file. + # + # @param end_index [Integer] + # + # @param file_path [OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath] + # + # @param start_index [Integer] + # + # @param text [String] The text in the message content that needs to be replaced. + # + # @param type [Symbol, :file_path] Always `file_path`. - class FilePath < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::FilePathAnnotation#file_path + class FilePath < OpenAI::Internal::Type::BaseModel # @!attribute file_id # The ID of the file that was generated. # # @return [String] required :file_id, String - # @!parse - # # @param file_id [String] - # # - # def initialize(file_id:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_id:) + # @param file_id [String] The ID of the file that was generated. end end end diff --git a/lib/openai/models/beta/threads/file_path_delta_annotation.rb b/lib/openai/models/beta/threads/file_path_delta_annotation.rb index 1ef92d3a..041f2a4a 100644 --- a/lib/openai/models/beta/threads/file_path_delta_annotation.rb +++ b/lib/openai/models/beta/threads/file_path_delta_annotation.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class FilePathDeltaAnnotation < OpenAI::BaseModel + class FilePathDeltaAnnotation < OpenAI::Internal::Type::BaseModel # @!attribute index # The index of the annotation in the text content part. # @@ -17,75 +17,53 @@ class FilePathDeltaAnnotation < OpenAI::BaseModel # @return [Symbol, :file_path] required :type, const: :file_path - # @!attribute [r] end_index + # @!attribute end_index # # @return [Integer, nil] optional :end_index, Integer - # @!parse - # # @return [Integer] - # attr_writer :end_index - - # @!attribute [r] file_path + # @!attribute file_path # # @return [OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath, nil] - optional :file_path, -> { OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath } - - # @!parse - # # @return [OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath] - # attr_writer :file_path + optional :file_path, -> { OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath } - # @!attribute [r] start_index + # @!attribute start_index # # @return [Integer, nil] optional :start_index, Integer - # @!parse - # # @return [Integer] - # attr_writer :start_index - - # @!attribute [r] text + # @!attribute text # The text in the message content that needs to be replaced. # # @return [String, nil] optional :text, String - # @!parse - # # @return [String] - # attr_writer :text - - # @!parse - # # A URL for the file that's generated when the assistant used the - # # `code_interpreter` tool to generate a file. - # # - # # @param index [Integer] - # # @param end_index [Integer] - # # @param file_path [OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath] - # # @param start_index [Integer] - # # @param text [String] - # # @param type [Symbol, :file_path] - # # - # def initialize(index:, end_index: nil, file_path: nil, start_index: nil, text: nil, type: :file_path, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, end_index: nil, file_path: nil, start_index: nil, text: nil, type: :file_path) + # A URL for the file that's generated when the assistant used the + # `code_interpreter` tool to generate a file. + # + # @param index [Integer] The index of the annotation in the text content part. + # + # @param end_index [Integer] + # + # @param file_path [OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath] + # + # @param start_index [Integer] + # + # @param text [String] The text in the message content that needs to be replaced. + # + # @param type [Symbol, :file_path] Always `file_path`. - class FilePath < OpenAI::BaseModel - # @!attribute [r] file_id + # @see OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation#file_path + class FilePath < OpenAI::Internal::Type::BaseModel + # @!attribute file_id # The ID of the file that was generated. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!parse - # # @param file_id [String] - # # - # def initialize(file_id: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_id: nil) + # @param file_id [String] The ID of the file that was generated. end end end diff --git a/lib/openai/models/beta/threads/image_file.rb b/lib/openai/models/beta/threads/image_file.rb index 2156325f..53cf02ed 100644 --- a/lib/openai/models/beta/threads/image_file.rb +++ b/lib/openai/models/beta/threads/image_file.rb @@ -4,44 +4,43 @@ module OpenAI module Models module Beta module Threads - class ImageFile < OpenAI::BaseModel + class ImageFile < OpenAI::Internal::Type::BaseModel # @!attribute file_id # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image - # in the message content. Set `purpose="vision"` when uploading the File if you - # need to later display the file content. + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. # # @return [String] required :file_id, String - # @!attribute [r] detail + # @!attribute detail # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. + # fewer tokens, you can opt in to high resolution using `high`. # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageFile::Detail, nil] - optional :detail, enum: -> { OpenAI::Models::Beta::Threads::ImageFile::Detail } - - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageFile::Detail] - # attr_writer :detail - - # @!parse - # # @param file_id [String] - # # @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageFile::Detail] - # # - # def initialize(file_id:, detail: nil, **) = super + optional :detail, enum: -> { OpenAI::Beta::Threads::ImageFile::Detail } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(file_id:, detail: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::ImageFile} for more details. + # + # @param file_id [String] The [File](https://platform.openai.com/docs/api-reference/files) ID of the image # + # @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageFile::Detail] Specifies the detail level of the image if specified by the user. `low` uses few + # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. - class Detail < OpenAI::Enum + # fewer tokens, you can opt in to high resolution using `high`. + # + # @see OpenAI::Models::Beta::Threads::ImageFile#detail + module Detail + extend OpenAI::Internal::Type::Enum + AUTO = :auto LOW = :low HIGH = :high - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/image_file_content_block.rb b/lib/openai/models/beta/threads/image_file_content_block.rb index 00121cfe..2ae8fe56 100644 --- a/lib/openai/models/beta/threads/image_file_content_block.rb +++ b/lib/openai/models/beta/threads/image_file_content_block.rb @@ -4,11 +4,11 @@ module OpenAI module Models module Beta module Threads - class ImageFileContentBlock < OpenAI::BaseModel + class ImageFileContentBlock < OpenAI::Internal::Type::BaseModel # @!attribute image_file # # @return [OpenAI::Models::Beta::Threads::ImageFile] - required :image_file, -> { OpenAI::Models::Beta::Threads::ImageFile } + required :image_file, -> { OpenAI::Beta::Threads::ImageFile } # @!attribute type # Always `image_file`. @@ -16,16 +16,13 @@ class ImageFileContentBlock < OpenAI::BaseModel # @return [Symbol, :image_file] required :type, const: :image_file - # @!parse - # # References an image [File](https://platform.openai.com/docs/api-reference/files) - # # in the content of a message. - # # - # # @param image_file [OpenAI::Models::Beta::Threads::ImageFile] - # # @param type [Symbol, :image_file] - # # - # def initialize(image_file:, type: :image_file, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(image_file:, type: :image_file) + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. + # + # @param image_file [OpenAI::Models::Beta::Threads::ImageFile] + # + # @param type [Symbol, :image_file] Always `image_file`. end end end diff --git a/lib/openai/models/beta/threads/image_file_delta.rb b/lib/openai/models/beta/threads/image_file_delta.rb index 81caa23f..b1d4c62e 100644 --- a/lib/openai/models/beta/threads/image_file_delta.rb +++ b/lib/openai/models/beta/threads/image_file_delta.rb @@ -4,48 +4,43 @@ module OpenAI module Models module Beta module Threads - class ImageFileDelta < OpenAI::BaseModel - # @!attribute [r] detail + class ImageFileDelta < OpenAI::Internal::Type::BaseModel + # @!attribute detail # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. + # fewer tokens, you can opt in to high resolution using `high`. # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageFileDelta::Detail, nil] - optional :detail, enum: -> { OpenAI::Models::Beta::Threads::ImageFileDelta::Detail } - - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageFileDelta::Detail] - # attr_writer :detail + optional :detail, enum: -> { OpenAI::Beta::Threads::ImageFileDelta::Detail } - # @!attribute [r] file_id + # @!attribute file_id # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image - # in the message content. Set `purpose="vision"` when uploading the File if you - # need to later display the file content. + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!parse - # # @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageFileDelta::Detail] - # # @param file_id [String] - # # - # def initialize(detail: nil, file_id: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(detail: nil, file_id: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::ImageFileDelta} for more details. # + # @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageFileDelta::Detail] Specifies the detail level of the image if specified by the user. `low` uses few + # + # @param file_id [String] The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + # Specifies the detail level of the image if specified by the user. `low` uses - # fewer tokens, you can opt in to high resolution using `high`. - class Detail < OpenAI::Enum + # fewer tokens, you can opt in to high resolution using `high`. + # + # @see OpenAI::Models::Beta::Threads::ImageFileDelta#detail + module Detail + extend OpenAI::Internal::Type::Enum + AUTO = :auto LOW = :low HIGH = :high - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/image_file_delta_block.rb b/lib/openai/models/beta/threads/image_file_delta_block.rb index c33babbb..8657f912 100644 --- a/lib/openai/models/beta/threads/image_file_delta_block.rb +++ b/lib/openai/models/beta/threads/image_file_delta_block.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class ImageFileDeltaBlock < OpenAI::BaseModel + class ImageFileDeltaBlock < OpenAI::Internal::Type::BaseModel # @!attribute index # The index of the content part in the message. # @@ -17,26 +17,20 @@ class ImageFileDeltaBlock < OpenAI::BaseModel # @return [Symbol, :image_file] required :type, const: :image_file - # @!attribute [r] image_file + # @!attribute image_file # # @return [OpenAI::Models::Beta::Threads::ImageFileDelta, nil] - optional :image_file, -> { OpenAI::Models::Beta::Threads::ImageFileDelta } + optional :image_file, -> { OpenAI::Beta::Threads::ImageFileDelta } - # @!parse - # # @return [OpenAI::Models::Beta::Threads::ImageFileDelta] - # attr_writer :image_file - - # @!parse - # # References an image [File](https://platform.openai.com/docs/api-reference/files) - # # in the content of a message. - # # - # # @param index [Integer] - # # @param image_file [OpenAI::Models::Beta::Threads::ImageFileDelta] - # # @param type [Symbol, :image_file] - # # - # def initialize(index:, image_file: nil, type: :image_file, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, image_file: nil, type: :image_file) + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. + # + # @param index [Integer] The index of the content part in the message. + # + # @param image_file [OpenAI::Models::Beta::Threads::ImageFileDelta] + # + # @param type [Symbol, :image_file] Always `image_file`. end end end diff --git a/lib/openai/models/beta/threads/image_url.rb b/lib/openai/models/beta/threads/image_url.rb index 8c08ac9d..a78260eb 100644 --- a/lib/openai/models/beta/threads/image_url.rb +++ b/lib/openai/models/beta/threads/image_url.rb @@ -4,43 +4,42 @@ module OpenAI module Models module Beta module Threads - class ImageURL < OpenAI::BaseModel + class ImageURL < OpenAI::Internal::Type::BaseModel # @!attribute url # The external URL of the image, must be a supported image types: jpeg, jpg, png, - # gif, webp. + # gif, webp. # # @return [String] required :url, String - # @!attribute [r] detail + # @!attribute detail # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. Default value is `auto` + # to high resolution using `high`. Default value is `auto` # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageURL::Detail, nil] - optional :detail, enum: -> { OpenAI::Models::Beta::Threads::ImageURL::Detail } - - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageURL::Detail] - # attr_writer :detail - - # @!parse - # # @param url [String] - # # @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageURL::Detail] - # # - # def initialize(url:, detail: nil, **) = super + optional :detail, enum: -> { OpenAI::Beta::Threads::ImageURL::Detail } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(url:, detail: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::ImageURL} for more details. + # + # @param url [String] The external URL of the image, must be a supported image types: jpeg, jpg, png, # + # @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageURL::Detail] Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. Default value is `auto` - class Detail < OpenAI::Enum + # to high resolution using `high`. Default value is `auto` + # + # @see OpenAI::Models::Beta::Threads::ImageURL#detail + module Detail + extend OpenAI::Internal::Type::Enum + AUTO = :auto LOW = :low HIGH = :high - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/image_url_content_block.rb b/lib/openai/models/beta/threads/image_url_content_block.rb index b800af73..d2f1a28d 100644 --- a/lib/openai/models/beta/threads/image_url_content_block.rb +++ b/lib/openai/models/beta/threads/image_url_content_block.rb @@ -4,11 +4,11 @@ module OpenAI module Models module Beta module Threads - class ImageURLContentBlock < OpenAI::BaseModel + class ImageURLContentBlock < OpenAI::Internal::Type::BaseModel # @!attribute image_url # # @return [OpenAI::Models::Beta::Threads::ImageURL] - required :image_url, -> { OpenAI::Models::Beta::Threads::ImageURL } + required :image_url, -> { OpenAI::Beta::Threads::ImageURL } # @!attribute type # The type of the content part. @@ -16,15 +16,12 @@ class ImageURLContentBlock < OpenAI::BaseModel # @return [Symbol, :image_url] required :type, const: :image_url - # @!parse - # # References an image URL in the content of a message. - # # - # # @param image_url [OpenAI::Models::Beta::Threads::ImageURL] - # # @param type [Symbol, :image_url] - # # - # def initialize(image_url:, type: :image_url, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(image_url:, type: :image_url) + # References an image URL in the content of a message. + # + # @param image_url [OpenAI::Models::Beta::Threads::ImageURL] + # + # @param type [Symbol, :image_url] The type of the content part. end end end diff --git a/lib/openai/models/beta/threads/image_url_delta.rb b/lib/openai/models/beta/threads/image_url_delta.rb index ad6177d4..43fba03f 100644 --- a/lib/openai/models/beta/threads/image_url_delta.rb +++ b/lib/openai/models/beta/threads/image_url_delta.rb @@ -4,47 +4,42 @@ module OpenAI module Models module Beta module Threads - class ImageURLDelta < OpenAI::BaseModel - # @!attribute [r] detail + class ImageURLDelta < OpenAI::Internal::Type::BaseModel + # @!attribute detail # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. + # to high resolution using `high`. # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageURLDelta::Detail, nil] - optional :detail, enum: -> { OpenAI::Models::Beta::Threads::ImageURLDelta::Detail } - - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::Threads::ImageURLDelta::Detail] - # attr_writer :detail + optional :detail, enum: -> { OpenAI::Beta::Threads::ImageURLDelta::Detail } - # @!attribute [r] url + # @!attribute url # The URL of the image, must be a supported image types: jpeg, jpg, png, gif, - # webp. + # webp. # # @return [String, nil] optional :url, String - # @!parse - # # @return [String] - # attr_writer :url - - # @!parse - # # @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageURLDelta::Detail] - # # @param url [String] - # # - # def initialize(detail: nil, url: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(detail: nil, url: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::ImageURLDelta} for more details. # + # @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageURLDelta::Detail] Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # + # @param url [String] The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in - # to high resolution using `high`. - class Detail < OpenAI::Enum + # to high resolution using `high`. + # + # @see OpenAI::Models::Beta::Threads::ImageURLDelta#detail + module Detail + extend OpenAI::Internal::Type::Enum + AUTO = :auto LOW = :low HIGH = :high - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/image_url_delta_block.rb b/lib/openai/models/beta/threads/image_url_delta_block.rb index 59f49efb..72079ef1 100644 --- a/lib/openai/models/beta/threads/image_url_delta_block.rb +++ b/lib/openai/models/beta/threads/image_url_delta_block.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class ImageURLDeltaBlock < OpenAI::BaseModel + class ImageURLDeltaBlock < OpenAI::Internal::Type::BaseModel # @!attribute index # The index of the content part in the message. # @@ -17,25 +17,19 @@ class ImageURLDeltaBlock < OpenAI::BaseModel # @return [Symbol, :image_url] required :type, const: :image_url - # @!attribute [r] image_url + # @!attribute image_url # # @return [OpenAI::Models::Beta::Threads::ImageURLDelta, nil] - optional :image_url, -> { OpenAI::Models::Beta::Threads::ImageURLDelta } + optional :image_url, -> { OpenAI::Beta::Threads::ImageURLDelta } - # @!parse - # # @return [OpenAI::Models::Beta::Threads::ImageURLDelta] - # attr_writer :image_url - - # @!parse - # # References an image URL in the content of a message. - # # - # # @param index [Integer] - # # @param image_url [OpenAI::Models::Beta::Threads::ImageURLDelta] - # # @param type [Symbol, :image_url] - # # - # def initialize(index:, image_url: nil, type: :image_url, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, image_url: nil, type: :image_url) + # References an image URL in the content of a message. + # + # @param index [Integer] The index of the content part in the message. + # + # @param image_url [OpenAI::Models::Beta::Threads::ImageURLDelta] + # + # @param type [Symbol, :image_url] Always `image_url`. end end end diff --git a/lib/openai/models/beta/threads/message.rb b/lib/openai/models/beta/threads/message.rb index b434fe46..b5bf5069 100644 --- a/lib/openai/models/beta/threads/message.rb +++ b/lib/openai/models/beta/threads/message.rb @@ -4,7 +4,8 @@ module OpenAI module Models module Beta module Threads - class Message < OpenAI::BaseModel + # @see OpenAI::Resources::Beta::Threads::Messages#create + class Message < OpenAI::Internal::Type::BaseModel # @!attribute id # The identifier, which can be referenced in API endpoints. # @@ -13,8 +14,8 @@ class Message < OpenAI::BaseModel # @!attribute assistant_id # If applicable, the ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) that - # authored this message. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) that + # authored this message. # # @return [String, nil] required :assistant_id, String, nil?: true @@ -24,7 +25,7 @@ class Message < OpenAI::BaseModel # # @return [Array, nil] required :attachments, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::Message::Attachment] }, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::Message::Attachment] }, nil?: true # @!attribute completed_at @@ -37,7 +38,7 @@ class Message < OpenAI::BaseModel # The content of the message in array of text and/or images. # # @return [Array] - required :content, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::MessageContent] } + required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::MessageContent] } # @!attribute created_at # The Unix timestamp (in seconds) for when the message was created. @@ -55,18 +56,18 @@ class Message < OpenAI::BaseModel # On an incomplete message, details about why the message is incomplete. # # @return [OpenAI::Models::Beta::Threads::Message::IncompleteDetails, nil] - required :incomplete_details, -> { OpenAI::Models::Beta::Threads::Message::IncompleteDetails }, nil?: true + required :incomplete_details, -> { OpenAI::Beta::Threads::Message::IncompleteDetails }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - required :metadata, OpenAI::HashOf[String], nil?: true + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute object # The object type, which is always `thread.message`. @@ -78,174 +79,163 @@ class Message < OpenAI::BaseModel # The entity that produced the message. One of `user` or `assistant`. # # @return [Symbol, OpenAI::Models::Beta::Threads::Message::Role] - required :role, enum: -> { OpenAI::Models::Beta::Threads::Message::Role } + required :role, enum: -> { OpenAI::Beta::Threads::Message::Role } # @!attribute run_id # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) - # associated with the creation of this message. Value is `null` when messages are - # created manually using the create message or create thread endpoints. + # associated with the creation of this message. Value is `null` when messages are + # created manually using the create message or create thread endpoints. # # @return [String, nil] required :run_id, String, nil?: true # @!attribute status # The status of the message, which can be either `in_progress`, `incomplete`, or - # `completed`. + # `completed`. # # @return [Symbol, OpenAI::Models::Beta::Threads::Message::Status] - required :status, enum: -> { OpenAI::Models::Beta::Threads::Message::Status } + required :status, enum: -> { OpenAI::Beta::Threads::Message::Status } # @!attribute thread_id # The [thread](https://platform.openai.com/docs/api-reference/threads) ID that - # this message belongs to. + # this message belongs to. # # @return [String] required :thread_id, String - # @!parse - # # Represents a message within a - # # [thread](https://platform.openai.com/docs/api-reference/threads). - # # - # # @param id [String] - # # @param assistant_id [String, nil] - # # @param attachments [Array, nil] - # # @param completed_at [Integer, nil] - # # @param content [Array] - # # @param created_at [Integer] - # # @param incomplete_at [Integer, nil] - # # @param incomplete_details [OpenAI::Models::Beta::Threads::Message::IncompleteDetails, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param role [Symbol, OpenAI::Models::Beta::Threads::Message::Role] - # # @param run_id [String, nil] - # # @param status [Symbol, OpenAI::Models::Beta::Threads::Message::Status] - # # @param thread_id [String] - # # @param object [Symbol, :"thread.message"] - # # - # def initialize( - # id:, - # assistant_id:, - # attachments:, - # completed_at:, - # content:, - # created_at:, - # incomplete_at:, - # incomplete_details:, - # metadata:, - # role:, - # run_id:, - # status:, - # thread_id:, - # object: :"thread.message", - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Attachment < OpenAI::BaseModel - # @!attribute [r] file_id + # @!method initialize(id:, assistant_id:, attachments:, completed_at:, content:, created_at:, incomplete_at:, incomplete_details:, metadata:, role:, run_id:, status:, thread_id:, object: :"thread.message") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Message} for more details. + # + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + # + # @param id [String] The identifier, which can be referenced in API endpoints. + # + # @param assistant_id [String, nil] If applicable, the ID of the [assistant](https://platform.openai.com/docs/api-re + # + # @param attachments [Array, nil] A list of files attached to the message, and the tools they were added to. + # + # @param completed_at [Integer, nil] The Unix timestamp (in seconds) for when the message was completed. + # + # @param content [Array] The content of the message in array of text and/or images. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the message was created. + # + # @param incomplete_at [Integer, nil] The Unix timestamp (in seconds) for when the message was marked as incomplete. + # + # @param incomplete_details [OpenAI::Models::Beta::Threads::Message::IncompleteDetails, nil] On an incomplete message, details about why the message is incomplete. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param role [Symbol, OpenAI::Models::Beta::Threads::Message::Role] The entity that produced the message. One of `user` or `assistant`. + # + # @param run_id [String, nil] The ID of the [run](https://platform.openai.com/docs/api-reference/runs) associa + # + # @param status [Symbol, OpenAI::Models::Beta::Threads::Message::Status] The status of the message, which can be either `in_progress`, `incomplete`, or ` + # + # @param thread_id [String] The [thread](https://platform.openai.com/docs/api-reference/threads) ID that thi + # + # @param object [Symbol, :"thread.message"] The object type, which is always `thread.message`. + + class Attachment < OpenAI::Internal::Type::BaseModel + # @!attribute file_id # The ID of the file to attach to the message. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!attribute [r] tools + # @!attribute tools # The tools to add this file to. # # @return [Array, nil] - optional :tools, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::Message::Attachment::Tool] } + optional :tools, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Message::Attachment::Tool] } - # @!parse - # # @return [Array] - # attr_writer :tools - - # @!parse - # # @param file_id [String] - # # @param tools [Array] - # # - # def initialize(file_id: nil, tools: nil, **) = super + # @!method initialize(file_id: nil, tools: nil) + # @param file_id [String] The ID of the file to attach to the message. + # + # @param tools [Array] The tools to add this file to. - # def initialize: (Hash | OpenAI::BaseModel) -> void + module Tool + extend OpenAI::Internal::Type::Union - # @abstract - # - class Tool < OpenAI::Union - variant -> { OpenAI::Models::Beta::CodeInterpreterTool } + variant -> { OpenAI::Beta::CodeInterpreterTool } - variant -> { OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly } + variant -> { OpenAI::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly } - class AssistantToolsFileSearchTypeOnly < OpenAI::BaseModel + class AssistantToolsFileSearchTypeOnly < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool being defined: `file_search` # # @return [Symbol, :file_search] required :type, const: :file_search - # @!parse - # # @param type [Symbol, :file_search] - # # - # def initialize(type: :file_search, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :file_search) + # @param type [Symbol, :file_search] The type of tool being defined: `file_search` end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly)] end end - class IncompleteDetails < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Message#incomplete_details + class IncompleteDetails < OpenAI::Internal::Type::BaseModel # @!attribute reason # The reason the message is incomplete. # # @return [Symbol, OpenAI::Models::Beta::Threads::Message::IncompleteDetails::Reason] - required :reason, enum: -> { OpenAI::Models::Beta::Threads::Message::IncompleteDetails::Reason } + required :reason, enum: -> { OpenAI::Beta::Threads::Message::IncompleteDetails::Reason } - # @!parse - # # On an incomplete message, details about why the message is incomplete. - # # - # # @param reason [Symbol, OpenAI::Models::Beta::Threads::Message::IncompleteDetails::Reason] - # # - # def initialize(reason:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(reason:) + # On an incomplete message, details about why the message is incomplete. # + # @param reason [Symbol, OpenAI::Models::Beta::Threads::Message::IncompleteDetails::Reason] The reason the message is incomplete. + # The reason the message is incomplete. - class Reason < OpenAI::Enum + # + # @see OpenAI::Models::Beta::Threads::Message::IncompleteDetails#reason + module Reason + extend OpenAI::Internal::Type::Enum + CONTENT_FILTER = :content_filter MAX_TOKENS = :max_tokens RUN_CANCELLED = :run_cancelled RUN_EXPIRED = :run_expired RUN_FAILED = :run_failed - finalize! + # @!method self.values + # @return [Array] end end - # @abstract - # # The entity that produced the message. One of `user` or `assistant`. - class Role < OpenAI::Enum + # + # @see OpenAI::Models::Beta::Threads::Message#role + module Role + extend OpenAI::Internal::Type::Enum + USER = :user ASSISTANT = :assistant - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The status of the message, which can be either `in_progress`, `incomplete`, or - # `completed`. - class Status < OpenAI::Enum + # `completed`. + # + # @see OpenAI::Models::Beta::Threads::Message#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress INCOMPLETE = :incomplete COMPLETED = :completed - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/message_content.rb b/lib/openai/models/beta/threads/message_content.rb index df27bb65..f0771098 100644 --- a/lib/openai/models/beta/threads/message_content.rb +++ b/lib/openai/models/beta/threads/message_content.rb @@ -4,24 +4,27 @@ module OpenAI module Models module Beta module Threads - # @abstract - # # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. - class MessageContent < OpenAI::Union + # in the content of a message. + module MessageContent + extend OpenAI::Internal::Type::Union + discriminator :type # References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. - variant :image_file, -> { OpenAI::Models::Beta::Threads::ImageFileContentBlock } + variant :image_file, -> { OpenAI::Beta::Threads::ImageFileContentBlock } # References an image URL in the content of a message. - variant :image_url, -> { OpenAI::Models::Beta::Threads::ImageURLContentBlock } + variant :image_url, -> { OpenAI::Beta::Threads::ImageURLContentBlock } # The text content that is part of a message. - variant :text, -> { OpenAI::Models::Beta::Threads::TextContentBlock } + variant :text, -> { OpenAI::Beta::Threads::TextContentBlock } # The refusal content generated by the assistant. - variant :refusal, -> { OpenAI::Models::Beta::Threads::RefusalContentBlock } + variant :refusal, -> { OpenAI::Beta::Threads::RefusalContentBlock } + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlock, OpenAI::Models::Beta::Threads::RefusalContentBlock)] end end end diff --git a/lib/openai/models/beta/threads/message_content_delta.rb b/lib/openai/models/beta/threads/message_content_delta.rb index f2e05dfb..908eb4d7 100644 --- a/lib/openai/models/beta/threads/message_content_delta.rb +++ b/lib/openai/models/beta/threads/message_content_delta.rb @@ -4,24 +4,27 @@ module OpenAI module Models module Beta module Threads - # @abstract - # # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. - class MessageContentDelta < OpenAI::Union + # in the content of a message. + module MessageContentDelta + extend OpenAI::Internal::Type::Union + discriminator :type # References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. - variant :image_file, -> { OpenAI::Models::Beta::Threads::ImageFileDeltaBlock } + variant :image_file, -> { OpenAI::Beta::Threads::ImageFileDeltaBlock } # The text content that is part of a message. - variant :text, -> { OpenAI::Models::Beta::Threads::TextDeltaBlock } + variant :text, -> { OpenAI::Beta::Threads::TextDeltaBlock } # The refusal content that is part of a message. - variant :refusal, -> { OpenAI::Models::Beta::Threads::RefusalDeltaBlock } + variant :refusal, -> { OpenAI::Beta::Threads::RefusalDeltaBlock } # References an image URL in the content of a message. - variant :image_url, -> { OpenAI::Models::Beta::Threads::ImageURLDeltaBlock } + variant :image_url, -> { OpenAI::Beta::Threads::ImageURLDeltaBlock } + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, OpenAI::Models::Beta::Threads::TextDeltaBlock, OpenAI::Models::Beta::Threads::RefusalDeltaBlock, OpenAI::Models::Beta::Threads::ImageURLDeltaBlock)] end end end diff --git a/lib/openai/models/beta/threads/message_content_part_param.rb b/lib/openai/models/beta/threads/message_content_part_param.rb index 86766a56..254bd67f 100644 --- a/lib/openai/models/beta/threads/message_content_part_param.rb +++ b/lib/openai/models/beta/threads/message_content_part_param.rb @@ -4,21 +4,24 @@ module OpenAI module Models module Beta module Threads - # @abstract - # # References an image [File](https://platform.openai.com/docs/api-reference/files) - # in the content of a message. - class MessageContentPartParam < OpenAI::Union + # in the content of a message. + module MessageContentPartParam + extend OpenAI::Internal::Type::Union + discriminator :type # References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. - variant :image_file, -> { OpenAI::Models::Beta::Threads::ImageFileContentBlock } + variant :image_file, -> { OpenAI::Beta::Threads::ImageFileContentBlock } # References an image URL in the content of a message. - variant :image_url, -> { OpenAI::Models::Beta::Threads::ImageURLContentBlock } + variant :image_url, -> { OpenAI::Beta::Threads::ImageURLContentBlock } # The text content that is part of a message. - variant :text, -> { OpenAI::Models::Beta::Threads::TextContentBlockParam } + variant :text, -> { OpenAI::Beta::Threads::TextContentBlockParam } + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam)] end end end diff --git a/lib/openai/models/beta/threads/message_create_params.rb b/lib/openai/models/beta/threads/message_create_params.rb index 544cf794..5bcd793e 100644 --- a/lib/openai/models/beta/threads/message_create_params.rb +++ b/lib/openai/models/beta/threads/message_create_params.rb @@ -4,140 +4,138 @@ module OpenAI module Models module Beta module Threads - class MessageCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Messages#create + class MessageCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute content # The text contents of the message. # # @return [String, Array] - required :content, union: -> { OpenAI::Models::Beta::Threads::MessageCreateParams::Content } + required :content, union: -> { OpenAI::Beta::Threads::MessageCreateParams::Content } # @!attribute role # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @return [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Role] - required :role, enum: -> { OpenAI::Models::Beta::Threads::MessageCreateParams::Role } + required :role, enum: -> { OpenAI::Beta::Threads::MessageCreateParams::Role } # @!attribute attachments # A list of files attached to the message, and the tools they should be added to. # # @return [Array, nil] optional :attachments, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment] }, + -> { + OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::MessageCreateParams::Attachment] + }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!parse - # # @param content [String, Array] - # # @param role [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Role] - # # @param attachments [Array, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(content:, role:, attachments: nil, metadata: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content:, role:, attachments: nil, metadata: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::MessageCreateParams} for more details. + # + # @param content [String, Array] The text contents of the message. # + # @param role [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Role] The role of the entity that is creating the message. Allowed values include: + # + # @param attachments [Array, nil] A list of files attached to the message, and the tools they should be added to. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # The text contents of the message. - class Content < OpenAI::Union - MessageContentPartParamArray = OpenAI::ArrayOf[union: -> { OpenAI::Models::Beta::Threads::MessageContentPartParam }] + module Content + extend OpenAI::Internal::Type::Union # The text contents of the message. variant String # An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models). - variant OpenAI::Models::Beta::Threads::MessageCreateParams::Content::MessageContentPartParamArray + variant -> { OpenAI::Models::Beta::Threads::MessageCreateParams::Content::MessageContentPartParamArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + MessageContentPartParamArray = + OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Beta::Threads::MessageContentPartParam }] end - # @abstract - # # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. - class Role < OpenAI::Enum + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + module Role + extend OpenAI::Internal::Type::Enum + USER = :user ASSISTANT = :assistant - finalize! + # @!method self.values + # @return [Array] end - class Attachment < OpenAI::BaseModel - # @!attribute [r] file_id + class Attachment < OpenAI::Internal::Type::BaseModel + # @!attribute file_id # The ID of the file to attach to the message. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!attribute [r] tools + # @!attribute tools # The tools to add this file to. # # @return [Array, nil] optional :tools, - -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool] } - - # @!parse - # # @return [Array] - # attr_writer :tools + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool] } - # @!parse - # # @param file_id [String] - # # @param tools [Array] - # # - # def initialize(file_id: nil, tools: nil, **) = super + # @!method initialize(file_id: nil, tools: nil) + # @param file_id [String] The ID of the file to attach to the message. + # + # @param tools [Array] The tools to add this file to. - # def initialize: (Hash | OpenAI::BaseModel) -> void + module Tool + extend OpenAI::Internal::Type::Union - # @abstract - # - class Tool < OpenAI::Union discriminator :type - variant :code_interpreter, -> { OpenAI::Models::Beta::CodeInterpreterTool } + variant :code_interpreter, -> { OpenAI::Beta::CodeInterpreterTool } - variant :file_search, - -> { OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch } + variant :file_search, -> { OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool being defined: `file_search` # # @return [Symbol, :file_search] required :type, const: :file_search - # @!parse - # # @param type [Symbol, :file_search] - # # - # def initialize(type: :file_search, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :file_search) + # @param type [Symbol, :file_search] The type of tool being defined: `file_search` end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch)] end end end diff --git a/lib/openai/models/beta/threads/message_delete_params.rb b/lib/openai/models/beta/threads/message_delete_params.rb index 8055fbe0..2e3c77d2 100644 --- a/lib/openai/models/beta/threads/message_delete_params.rb +++ b/lib/openai/models/beta/threads/message_delete_params.rb @@ -4,23 +4,19 @@ module OpenAI module Models module Beta module Threads - class MessageDeleteParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Messages#delete + class MessageDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute thread_id # # @return [String] required :thread_id, String - # @!parse - # # @param thread_id [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(thread_id:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(thread_id:, request_options: {}) + # @param thread_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/threads/message_deleted.rb b/lib/openai/models/beta/threads/message_deleted.rb index a3329fce..42e56be5 100644 --- a/lib/openai/models/beta/threads/message_deleted.rb +++ b/lib/openai/models/beta/threads/message_deleted.rb @@ -4,7 +4,8 @@ module OpenAI module Models module Beta module Threads - class MessageDeleted < OpenAI::BaseModel + # @see OpenAI::Resources::Beta::Threads::Messages#delete + class MessageDeleted < OpenAI::Internal::Type::BaseModel # @!attribute id # # @return [String] @@ -13,21 +14,17 @@ class MessageDeleted < OpenAI::BaseModel # @!attribute deleted # # @return [Boolean] - required :deleted, OpenAI::BooleanModel + required :deleted, OpenAI::Internal::Type::Boolean # @!attribute object # # @return [Symbol, :"thread.message.deleted"] required :object, const: :"thread.message.deleted" - # @!parse - # # @param id [String] - # # @param deleted [Boolean] - # # @param object [Symbol, :"thread.message.deleted"] - # # - # def initialize(id:, deleted:, object: :"thread.message.deleted", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, deleted:, object: :"thread.message.deleted") + # @param id [String] + # @param deleted [Boolean] + # @param object [Symbol, :"thread.message.deleted"] end end end diff --git a/lib/openai/models/beta/threads/message_delta.rb b/lib/openai/models/beta/threads/message_delta.rb index f3bc9993..8d151261 100644 --- a/lib/openai/models/beta/threads/message_delta.rb +++ b/lib/openai/models/beta/threads/message_delta.rb @@ -4,45 +4,38 @@ module OpenAI module Models module Beta module Threads - class MessageDelta < OpenAI::BaseModel - # @!attribute [r] content + class MessageDelta < OpenAI::Internal::Type::BaseModel + # @!attribute content # The content of the message in array of text and/or images. # # @return [Array, nil] - optional :content, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::MessageContentDelta] } + optional :content, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::MessageContentDelta] } - # @!parse - # # @return [Array] - # attr_writer :content - - # @!attribute [r] role + # @!attribute role # The entity that produced the message. One of `user` or `assistant`. # # @return [Symbol, OpenAI::Models::Beta::Threads::MessageDelta::Role, nil] - optional :role, enum: -> { OpenAI::Models::Beta::Threads::MessageDelta::Role } - - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::Threads::MessageDelta::Role] - # attr_writer :role - - # @!parse - # # The delta containing the fields that have changed on the Message. - # # - # # @param content [Array] - # # @param role [Symbol, OpenAI::Models::Beta::Threads::MessageDelta::Role] - # # - # def initialize(content: nil, role: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :role, enum: -> { OpenAI::Beta::Threads::MessageDelta::Role } - # @abstract + # @!method initialize(content: nil, role: nil) + # The delta containing the fields that have changed on the Message. # + # @param content [Array] The content of the message in array of text and/or images. + # + # @param role [Symbol, OpenAI::Models::Beta::Threads::MessageDelta::Role] The entity that produced the message. One of `user` or `assistant`. + # The entity that produced the message. One of `user` or `assistant`. - class Role < OpenAI::Enum + # + # @see OpenAI::Models::Beta::Threads::MessageDelta#role + module Role + extend OpenAI::Internal::Type::Enum + USER = :user ASSISTANT = :assistant - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/message_delta_event.rb b/lib/openai/models/beta/threads/message_delta_event.rb index a008085e..14190d67 100644 --- a/lib/openai/models/beta/threads/message_delta_event.rb +++ b/lib/openai/models/beta/threads/message_delta_event.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class MessageDeltaEvent < OpenAI::BaseModel + class MessageDeltaEvent < OpenAI::Internal::Type::BaseModel # @!attribute id # The identifier of the message, which can be referenced in API endpoints. # @@ -15,7 +15,7 @@ class MessageDeltaEvent < OpenAI::BaseModel # The delta containing the fields that have changed on the Message. # # @return [OpenAI::Models::Beta::Threads::MessageDelta] - required :delta, -> { OpenAI::Models::Beta::Threads::MessageDelta } + required :delta, -> { OpenAI::Beta::Threads::MessageDelta } # @!attribute object # The object type, which is always `thread.message.delta`. @@ -23,17 +23,15 @@ class MessageDeltaEvent < OpenAI::BaseModel # @return [Symbol, :"thread.message.delta"] required :object, const: :"thread.message.delta" - # @!parse - # # Represents a message delta i.e. any changed fields on a message during - # # streaming. - # # - # # @param id [String] - # # @param delta [OpenAI::Models::Beta::Threads::MessageDelta] - # # @param object [Symbol, :"thread.message.delta"] - # # - # def initialize(id:, delta:, object: :"thread.message.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, delta:, object: :"thread.message.delta") + # Represents a message delta i.e. any changed fields on a message during + # streaming. + # + # @param id [String] The identifier of the message, which can be referenced in API endpoints. + # + # @param delta [OpenAI::Models::Beta::Threads::MessageDelta] The delta containing the fields that have changed on the Message. + # + # @param object [Symbol, :"thread.message.delta"] The object type, which is always `thread.message.delta`. end end end diff --git a/lib/openai/models/beta/threads/message_list_params.rb b/lib/openai/models/beta/threads/message_list_params.rb index 1a302d8c..1358425b 100644 --- a/lib/openai/models/beta/threads/message_list_params.rb +++ b/lib/openai/models/beta/threads/message_list_params.rb @@ -4,90 +4,75 @@ module OpenAI module Models module Beta module Threads - class MessageListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Messages#list + class MessageListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] before + # @!attribute before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String - # @!parse - # # @return [String] - # attr_writer :before - - # @!attribute [r] limit + # @!attribute limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!attribute [r] order + # @!attribute order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::Beta::Threads::MessageListParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::Beta::Threads::MessageListParams::Order } + optional :order, enum: -> { OpenAI::Beta::Threads::MessageListParams::Order } - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::Threads::MessageListParams::Order] - # attr_writer :order - - # @!attribute [r] run_id + # @!attribute run_id # Filter messages by the run ID that generated them. # # @return [String, nil] optional :run_id, String - # @!parse - # # @return [String] - # attr_writer :run_id - - # @!parse - # # @param after [String] - # # @param before [String] - # # @param limit [Integer] - # # @param order [Symbol, OpenAI::Models::Beta::Threads::MessageListParams::Order] - # # @param run_id [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, before: nil, limit: nil, order: nil, run_id: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(after: nil, before: nil, limit: nil, order: nil, run_id: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::MessageListParams} for more details. # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::Beta::Threads::MessageListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param run_id [String] Filter messages by the run ID that generated them. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. - class Order < OpenAI::Enum + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/message_retrieve_params.rb b/lib/openai/models/beta/threads/message_retrieve_params.rb index 0280a2b9..4b724f65 100644 --- a/lib/openai/models/beta/threads/message_retrieve_params.rb +++ b/lib/openai/models/beta/threads/message_retrieve_params.rb @@ -4,23 +4,19 @@ module OpenAI module Models module Beta module Threads - class MessageRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Messages#retrieve + class MessageRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute thread_id # # @return [String] required :thread_id, String - # @!parse - # # @param thread_id [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(thread_id:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(thread_id:, request_options: {}) + # @param thread_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/threads/message_update_params.rb b/lib/openai/models/beta/threads/message_update_params.rb index 5e7ff7f0..43bbab67 100644 --- a/lib/openai/models/beta/threads/message_update_params.rb +++ b/lib/openai/models/beta/threads/message_update_params.rb @@ -4,10 +4,10 @@ module OpenAI module Models module Beta module Threads - class MessageUpdateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Messages#update + class MessageUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute thread_id # @@ -16,23 +16,24 @@ class MessageUpdateParams < OpenAI::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!parse - # # @param thread_id [String] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(thread_id:, metadata: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(thread_id:, metadata: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::MessageUpdateParams} for more details. + # + # @param thread_id [String] + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/threads/refusal_content_block.rb b/lib/openai/models/beta/threads/refusal_content_block.rb index d174b062..91eba521 100644 --- a/lib/openai/models/beta/threads/refusal_content_block.rb +++ b/lib/openai/models/beta/threads/refusal_content_block.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class RefusalContentBlock < OpenAI::BaseModel + class RefusalContentBlock < OpenAI::Internal::Type::BaseModel # @!attribute refusal # # @return [String] @@ -16,15 +16,12 @@ class RefusalContentBlock < OpenAI::BaseModel # @return [Symbol, :refusal] required :type, const: :refusal - # @!parse - # # The refusal content generated by the assistant. - # # - # # @param refusal [String] - # # @param type [Symbol, :refusal] - # # - # def initialize(refusal:, type: :refusal, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(refusal:, type: :refusal) + # The refusal content generated by the assistant. + # + # @param refusal [String] + # + # @param type [Symbol, :refusal] Always `refusal`. end end end diff --git a/lib/openai/models/beta/threads/refusal_delta_block.rb b/lib/openai/models/beta/threads/refusal_delta_block.rb index e65375c1..cdb3d1ea 100644 --- a/lib/openai/models/beta/threads/refusal_delta_block.rb +++ b/lib/openai/models/beta/threads/refusal_delta_block.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class RefusalDeltaBlock < OpenAI::BaseModel + class RefusalDeltaBlock < OpenAI::Internal::Type::BaseModel # @!attribute index # The index of the refusal part in the message. # @@ -17,25 +17,19 @@ class RefusalDeltaBlock < OpenAI::BaseModel # @return [Symbol, :refusal] required :type, const: :refusal - # @!attribute [r] refusal + # @!attribute refusal # # @return [String, nil] optional :refusal, String - # @!parse - # # @return [String] - # attr_writer :refusal - - # @!parse - # # The refusal content that is part of a message. - # # - # # @param index [Integer] - # # @param refusal [String] - # # @param type [Symbol, :refusal] - # # - # def initialize(index:, refusal: nil, type: :refusal, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, refusal: nil, type: :refusal) + # The refusal content that is part of a message. + # + # @param index [Integer] The index of the refusal part in the message. + # + # @param refusal [String] + # + # @param type [Symbol, :refusal] Always `refusal`. end end end diff --git a/lib/openai/models/beta/threads/required_action_function_tool_call.rb b/lib/openai/models/beta/threads/required_action_function_tool_call.rb index 51e0aba3..7f1eee07 100644 --- a/lib/openai/models/beta/threads/required_action_function_tool_call.rb +++ b/lib/openai/models/beta/threads/required_action_function_tool_call.rb @@ -4,12 +4,12 @@ module OpenAI module Models module Beta module Threads - class RequiredActionFunctionToolCall < OpenAI::BaseModel + class RequiredActionFunctionToolCall < OpenAI::Internal::Type::BaseModel # @!attribute id # The ID of the tool call. This ID must be referenced when you submit the tool - # outputs in using the - # [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) - # endpoint. + # outputs in using the + # [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # endpoint. # # @return [String] required :id, String @@ -18,27 +18,30 @@ class RequiredActionFunctionToolCall < OpenAI::BaseModel # The function definition. # # @return [OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function] - required :function, -> { OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function } + required :function, -> { OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function } # @!attribute type # The type of tool call the output is required for. For now, this is always - # `function`. + # `function`. # # @return [Symbol, :function] required :type, const: :function - # @!parse - # # Tool call objects - # # - # # @param id [String] - # # @param function [OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function] - # # @param type [Symbol, :function] - # # - # def initialize(id:, function:, type: :function, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, function:, type: :function) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall} for more + # details. + # + # Tool call objects + # + # @param id [String] The ID of the tool call. This ID must be referenced when you submit the tool out + # + # @param function [OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function] The function definition. + # + # @param type [Symbol, :function] The type of tool call the output is required for. For now, this is always `funct - class Function < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall#function + class Function < OpenAI::Internal::Type::BaseModel # @!attribute arguments # The arguments that the model expects you to pass to the function. # @@ -51,15 +54,12 @@ class Function < OpenAI::BaseModel # @return [String] required :name, String - # @!parse - # # The function definition. - # # - # # @param arguments [String] - # # @param name [String] - # # - # def initialize(arguments:, name:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(arguments:, name:) + # The function definition. + # + # @param arguments [String] The arguments that the model expects you to pass to the function. + # + # @param name [String] The name of the function. end end end diff --git a/lib/openai/models/beta/threads/run.rb b/lib/openai/models/beta/threads/run.rb index 02c20e73..510077e4 100644 --- a/lib/openai/models/beta/threads/run.rb +++ b/lib/openai/models/beta/threads/run.rb @@ -4,7 +4,10 @@ module OpenAI module Models module Beta module Threads - class Run < OpenAI::BaseModel + # @see OpenAI::Resources::Beta::Threads::Runs#create + # + # @see OpenAI::Resources::Beta::Threads::Runs#create_stream_raw + class Run < OpenAI::Internal::Type::BaseModel # @!attribute id # The identifier, which can be referenced in API endpoints. # @@ -13,8 +16,8 @@ class Run < OpenAI::BaseModel # @!attribute assistant_id # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # execution of this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # execution of this run. # # @return [String] required :assistant_id, String @@ -51,15 +54,15 @@ class Run < OpenAI::BaseModel # @!attribute incomplete_details # Details on why the run is incomplete. Will be `null` if the run is not - # incomplete. + # incomplete. # # @return [OpenAI::Models::Beta::Threads::Run::IncompleteDetails, nil] - required :incomplete_details, -> { OpenAI::Models::Beta::Threads::Run::IncompleteDetails }, nil?: true + required :incomplete_details, -> { OpenAI::Beta::Threads::Run::IncompleteDetails }, nil?: true # @!attribute instructions # The instructions that the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. # # @return [String] required :instructions, String @@ -68,37 +71,37 @@ class Run < OpenAI::BaseModel # The last error associated with this run. Will be `null` if there are no errors. # # @return [OpenAI::Models::Beta::Threads::Run::LastError, nil] - required :last_error, -> { OpenAI::Models::Beta::Threads::Run::LastError }, nil?: true + required :last_error, -> { OpenAI::Beta::Threads::Run::LastError }, nil?: true # @!attribute max_completion_tokens # The maximum number of completion tokens specified to have been used over the - # course of the run. + # course of the run. # # @return [Integer, nil] required :max_completion_tokens, Integer, nil?: true # @!attribute max_prompt_tokens # The maximum number of prompt tokens specified to have been used over the course - # of the run. + # of the run. # # @return [Integer, nil] required :max_prompt_tokens, Integer, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - required :metadata, OpenAI::HashOf[String], nil?: true + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute model # The model that the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. # # @return [String] required :model, String @@ -111,43 +114,43 @@ class Run < OpenAI::BaseModel # @!attribute parallel_tool_calls # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. # # @return [Boolean] - required :parallel_tool_calls, OpenAI::BooleanModel + required :parallel_tool_calls, OpenAI::Internal::Type::Boolean # @!attribute required_action # Details on the action required to continue the run. Will be `null` if no action - # is required. + # is required. # # @return [OpenAI::Models::Beta::Threads::Run::RequiredAction, nil] - required :required_action, -> { OpenAI::Models::Beta::Threads::Run::RequiredAction }, nil?: true + required :required_action, -> { OpenAI::Beta::Threads::Run::RequiredAction }, nil?: true # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. - # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - required :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true + required :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute started_at # The Unix timestamp (in seconds) for when the run was started. @@ -157,52 +160,52 @@ class Run < OpenAI::BaseModel # @!attribute status # The status of the run, which can be either `queued`, `in_progress`, - # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, - # `incomplete`, or `expired`. + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. # # @return [Symbol, OpenAI::Models::Beta::Threads::RunStatus] - required :status, enum: -> { OpenAI::Models::Beta::Threads::RunStatus } + required :status, enum: -> { OpenAI::Beta::Threads::RunStatus } # @!attribute thread_id # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # that was executed on as a part of this run. + # that was executed on as a part of this run. # # @return [String] required :thread_id, String # @!attribute tool_choice # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # # @return [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] - required :tool_choice, union: -> { OpenAI::Models::Beta::AssistantToolChoiceOption }, nil?: true + required :tool_choice, union: -> { OpenAI::Beta::AssistantToolChoiceOption }, nil?: true # @!attribute tools # The list of tools that the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - # this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. # # @return [Array] - required :tools, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool] } + required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool] } # @!attribute truncation_strategy # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the initial context window of the run. # # @return [OpenAI::Models::Beta::Threads::Run::TruncationStrategy, nil] - required :truncation_strategy, -> { OpenAI::Models::Beta::Threads::Run::TruncationStrategy }, nil?: true + required :truncation_strategy, -> { OpenAI::Beta::Threads::Run::TruncationStrategy }, nil?: true # @!attribute usage # Usage statistics related to the run. This value will be `null` if the run is not - # in a terminal state (i.e. `in_progress`, `queued`, etc.). + # in a terminal state (i.e. `in_progress`, `queued`, etc.). # # @return [OpenAI::Models::Beta::Threads::Run::Usage, nil] - required :usage, -> { OpenAI::Models::Beta::Threads::Run::Usage }, nil?: true + required :usage, -> { OpenAI::Beta::Threads::Run::Usage }, nil?: true # @!attribute temperature # The sampling temperature used for this run. If not set, defaults to 1. @@ -216,113 +219,107 @@ class Run < OpenAI::BaseModel # @return [Float, nil] optional :top_p, Float, nil?: true - # @!parse - # # Represents an execution run on a - # # [thread](https://platform.openai.com/docs/api-reference/threads). - # # - # # @param id [String] - # # @param assistant_id [String] - # # @param cancelled_at [Integer, nil] - # # @param completed_at [Integer, nil] - # # @param created_at [Integer] - # # @param expires_at [Integer, nil] - # # @param failed_at [Integer, nil] - # # @param incomplete_details [OpenAI::Models::Beta::Threads::Run::IncompleteDetails, nil] - # # @param instructions [String] - # # @param last_error [OpenAI::Models::Beta::Threads::Run::LastError, nil] - # # @param max_completion_tokens [Integer, nil] - # # @param max_prompt_tokens [Integer, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param model [String] - # # @param parallel_tool_calls [Boolean] - # # @param required_action [OpenAI::Models::Beta::Threads::Run::RequiredAction, nil] - # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - # # @param started_at [Integer, nil] - # # @param status [Symbol, OpenAI::Models::Beta::Threads::RunStatus] - # # @param thread_id [String] - # # @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] - # # @param tools [Array] - # # @param truncation_strategy [OpenAI::Models::Beta::Threads::Run::TruncationStrategy, nil] - # # @param usage [OpenAI::Models::Beta::Threads::Run::Usage, nil] - # # @param temperature [Float, nil] - # # @param top_p [Float, nil] - # # @param object [Symbol, :"thread.run"] - # # - # def initialize( - # id:, - # assistant_id:, - # cancelled_at:, - # completed_at:, - # created_at:, - # expires_at:, - # failed_at:, - # incomplete_details:, - # instructions:, - # last_error:, - # max_completion_tokens:, - # max_prompt_tokens:, - # metadata:, - # model:, - # parallel_tool_calls:, - # required_action:, - # response_format:, - # started_at:, - # status:, - # thread_id:, - # tool_choice:, - # tools:, - # truncation_strategy:, - # usage:, - # temperature: nil, - # top_p: nil, - # object: :"thread.run", - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class IncompleteDetails < OpenAI::BaseModel - # @!attribute [r] reason + # @!method initialize(id:, assistant_id:, cancelled_at:, completed_at:, created_at:, expires_at:, failed_at:, incomplete_details:, instructions:, last_error:, max_completion_tokens:, max_prompt_tokens:, metadata:, model:, parallel_tool_calls:, required_action:, response_format:, started_at:, status:, thread_id:, tool_choice:, tools:, truncation_strategy:, usage:, temperature: nil, top_p: nil, object: :"thread.run") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Run} for more details. + # + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + # + # @param id [String] The identifier, which can be referenced in API endpoints. + # + # @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista + # + # @param cancelled_at [Integer, nil] The Unix timestamp (in seconds) for when the run was cancelled. + # + # @param completed_at [Integer, nil] The Unix timestamp (in seconds) for when the run was completed. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the run was created. + # + # @param expires_at [Integer, nil] The Unix timestamp (in seconds) for when the run will expire. + # + # @param failed_at [Integer, nil] The Unix timestamp (in seconds) for when the run failed. + # + # @param incomplete_details [OpenAI::Models::Beta::Threads::Run::IncompleteDetails, nil] Details on why the run is incomplete. Will be `null` if the run is not incomplet + # + # @param instructions [String] The instructions that the [assistant](https://platform.openai.com/docs/api-refer + # + # @param last_error [OpenAI::Models::Beta::Threads::Run::LastError, nil] The last error associated with this run. Will be `null` if there are no errors. + # + # @param max_completion_tokens [Integer, nil] The maximum number of completion tokens specified to have been used over the cou + # + # @param max_prompt_tokens [Integer, nil] The maximum number of prompt tokens specified to have been used over the course + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String] The model that the [assistant](https://platform.openai.com/docs/api-reference/as + # + # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g + # + # @param required_action [OpenAI::Models::Beta::Threads::Run::RequiredAction, nil] Details on the action required to continue the run. Will be `null` if no action + # + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: + # + # @param started_at [Integer, nil] The Unix timestamp (in seconds) for when the run was started. + # + # @param status [Symbol, OpenAI::Models::Beta::Threads::RunStatus] The status of the run, which can be either `queued`, `in_progress`, `requires_ac + # + # @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t + # + # @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Controls which (if any) tool is called by the model. + # + # @param tools [Array] The list of tools that the [assistant](https://platform.openai.com/docs/api-refe + # + # @param truncation_strategy [OpenAI::Models::Beta::Threads::Run::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro + # + # @param usage [OpenAI::Models::Beta::Threads::Run::Usage, nil] Usage statistics related to the run. This value will be `null` if the run is not + # + # @param temperature [Float, nil] The sampling temperature used for this run. If not set, defaults to 1. + # + # @param top_p [Float, nil] The nucleus sampling value used for this run. If not set, defaults to 1. + # + # @param object [Symbol, :"thread.run"] The object type, which is always `thread.run`. + + # @see OpenAI::Models::Beta::Threads::Run#incomplete_details + class IncompleteDetails < OpenAI::Internal::Type::BaseModel + # @!attribute reason # The reason why the run is incomplete. This will point to which specific token - # limit was reached over the course of the run. + # limit was reached over the course of the run. # # @return [Symbol, OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason, nil] - optional :reason, enum: -> { OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason } - - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason] - # attr_writer :reason + optional :reason, enum: -> { OpenAI::Beta::Threads::Run::IncompleteDetails::Reason } - # @!parse - # # Details on why the run is incomplete. Will be `null` if the run is not - # # incomplete. - # # - # # @param reason [Symbol, OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason] - # # - # def initialize(reason: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(reason: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Run::IncompleteDetails} for more details. + # + # Details on why the run is incomplete. Will be `null` if the run is not + # incomplete. # + # @param reason [Symbol, OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason] The reason why the run is incomplete. This will point to which specific token li + # The reason why the run is incomplete. This will point to which specific token - # limit was reached over the course of the run. - class Reason < OpenAI::Enum + # limit was reached over the course of the run. + # + # @see OpenAI::Models::Beta::Threads::Run::IncompleteDetails#reason + module Reason + extend OpenAI::Internal::Type::Enum + MAX_COMPLETION_TOKENS = :max_completion_tokens MAX_PROMPT_TOKENS = :max_prompt_tokens - finalize! + # @!method self.values + # @return [Array] end end - class LastError < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Run#last_error + class LastError < OpenAI::Internal::Type::BaseModel # @!attribute code # One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. # # @return [Symbol, OpenAI::Models::Beta::Threads::Run::LastError::Code] - required :code, enum: -> { OpenAI::Models::Beta::Threads::Run::LastError::Code } + required :code, enum: -> { OpenAI::Beta::Threads::Run::LastError::Code } # @!attribute message # A human-readable description of the error. @@ -330,35 +327,35 @@ class LastError < OpenAI::BaseModel # @return [String] required :message, String - # @!parse - # # The last error associated with this run. Will be `null` if there are no errors. - # # - # # @param code [Symbol, OpenAI::Models::Beta::Threads::Run::LastError::Code] - # # @param message [String] - # # - # def initialize(code:, message:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(code:, message:) + # The last error associated with this run. Will be `null` if there are no errors. # + # @param code [Symbol, OpenAI::Models::Beta::Threads::Run::LastError::Code] One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + # + # @param message [String] A human-readable description of the error. + # One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - class Code < OpenAI::Enum + # + # @see OpenAI::Models::Beta::Threads::Run::LastError#code + module Code + extend OpenAI::Internal::Type::Enum + SERVER_ERROR = :server_error RATE_LIMIT_EXCEEDED = :rate_limit_exceeded INVALID_PROMPT = :invalid_prompt - finalize! + # @!method self.values + # @return [Array] end end - class RequiredAction < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Run#required_action + class RequiredAction < OpenAI::Internal::Type::BaseModel # @!attribute submit_tool_outputs # Details on the tool outputs needed for this run to continue. # # @return [OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs] - required :submit_tool_outputs, - -> { OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs } + required :submit_tool_outputs, -> { OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs } # @!attribute type # For now, this is always `submit_tool_outputs`. @@ -366,79 +363,78 @@ class RequiredAction < OpenAI::BaseModel # @return [Symbol, :submit_tool_outputs] required :type, const: :submit_tool_outputs - # @!parse - # # Details on the action required to continue the run. Will be `null` if no action - # # is required. - # # - # # @param submit_tool_outputs [OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs] - # # @param type [Symbol, :submit_tool_outputs] - # # - # def initialize(submit_tool_outputs:, type: :submit_tool_outputs, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(submit_tool_outputs:, type: :submit_tool_outputs) + # Details on the action required to continue the run. Will be `null` if no action + # is required. + # + # @param submit_tool_outputs [OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs] Details on the tool outputs needed for this run to continue. + # + # @param type [Symbol, :submit_tool_outputs] For now, this is always `submit_tool_outputs`. - class SubmitToolOutputs < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Run::RequiredAction#submit_tool_outputs + class SubmitToolOutputs < OpenAI::Internal::Type::BaseModel # @!attribute tool_calls # A list of the relevant tool calls. # # @return [Array] required :tool_calls, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall] } - - # @!parse - # # Details on the tool outputs needed for this run to continue. - # # - # # @param tool_calls [Array] - # # - # def initialize(tool_calls:, **) = super + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::RequiredActionFunctionToolCall] } - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(tool_calls:) + # Details on the tool outputs needed for this run to continue. + # + # @param tool_calls [Array] A list of the relevant tool calls. end end - class TruncationStrategy < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Run#truncation_strategy + class TruncationStrategy < OpenAI::Internal::Type::BaseModel # @!attribute type # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. # # @return [Symbol, OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type] - required :type, enum: -> { OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type } + required :type, enum: -> { OpenAI::Beta::Threads::Run::TruncationStrategy::Type } # @!attribute last_messages # The number of most recent messages from the thread when constructing the context - # for the run. + # for the run. # # @return [Integer, nil] optional :last_messages, Integer, nil?: true - # @!parse - # # Controls for how a thread will be truncated prior to the run. Use this to - # # control the intial context window of the run. - # # - # # @param type [Symbol, OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type] - # # @param last_messages [Integer, nil] - # # - # def initialize(type:, last_messages: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(type:, last_messages: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Run::TruncationStrategy} for more details. + # + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. # + # @param type [Symbol, OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to + # + # @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context + # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. - class Type < OpenAI::Enum + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + # + # @see OpenAI::Models::Beta::Threads::Run::TruncationStrategy#type + module Type + extend OpenAI::Internal::Type::Enum + AUTO = :auto LAST_MESSAGES = :last_messages - finalize! + # @!method self.values + # @return [Array] end end - class Usage < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Run#usage + class Usage < OpenAI::Internal::Type::BaseModel # @!attribute completion_tokens # Number of completion tokens used over the course of the run. # @@ -457,17 +453,15 @@ class Usage < OpenAI::BaseModel # @return [Integer] required :total_tokens, Integer - # @!parse - # # Usage statistics related to the run. This value will be `null` if the run is not - # # in a terminal state (i.e. `in_progress`, `queued`, etc.). - # # - # # @param completion_tokens [Integer] - # # @param prompt_tokens [Integer] - # # @param total_tokens [Integer] - # # - # def initialize(completion_tokens:, prompt_tokens:, total_tokens:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(completion_tokens:, prompt_tokens:, total_tokens:) + # Usage statistics related to the run. This value will be `null` if the run is not + # in a terminal state (i.e. `in_progress`, `queued`, etc.). + # + # @param completion_tokens [Integer] Number of completion tokens used over the course of the run. + # + # @param prompt_tokens [Integer] Number of prompt tokens used over the course of the run. + # + # @param total_tokens [Integer] Total number of tokens used (prompt + completion). end end end diff --git a/lib/openai/models/beta/threads/run_cancel_params.rb b/lib/openai/models/beta/threads/run_cancel_params.rb index 230c1059..13baf1ce 100644 --- a/lib/openai/models/beta/threads/run_cancel_params.rb +++ b/lib/openai/models/beta/threads/run_cancel_params.rb @@ -4,23 +4,19 @@ module OpenAI module Models module Beta module Threads - class RunCancelParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Runs#cancel + class RunCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute thread_id # # @return [String] required :thread_id, String - # @!parse - # # @param thread_id [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(thread_id:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(thread_id:, request_options: {}) + # @param thread_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/threads/run_create_params.rb b/lib/openai/models/beta/threads/run_create_params.rb index c655fda0..2d44abe8 100644 --- a/lib/openai/models/beta/threads/run_create_params.rb +++ b/lib/openai/models/beta/threads/run_create_params.rb @@ -4,39 +4,38 @@ module OpenAI module Models module Beta module Threads - class RunCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Runs#create + # + # @see OpenAI::Resources::Beta::Threads::Runs#create_stream_raw + class RunCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute assistant_id # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. # # @return [String] required :assistant_id, String - # @!attribute [r] include + # @!attribute include # A list of additional fields to include in the response. Currently the only - # supported value is `step_details.tool_calls[*].file_search.results[*].content` - # to fetch the file search result content. + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # # @return [Array, nil] - optional :include, -> { OpenAI::ArrayOf[enum: OpenAI::Models::Beta::Threads::Runs::RunStepInclude] } - - # @!parse - # # @return [Array] - # attr_writer :include + optional :include, + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Beta::Threads::Runs::RunStepInclude] } # @!attribute additional_instructions # Appends additional instructions at the end of the instructions for the run. This - # is useful for modifying the behavior on a per-run basis without overriding other - # instructions. + # is useful for modifying the behavior on a per-run basis without overriding other + # instructions. # # @return [String, nil] optional :additional_instructions, String, nil?: true @@ -46,383 +45,387 @@ class RunCreateParams < OpenAI::BaseModel # # @return [Array, nil] optional :additional_messages, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage] }, + -> { + OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage] + }, nil?: true # @!attribute instructions # Overrides the - # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) - # of the assistant. This is useful for modifying the behavior on a per-run basis. + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. # # @return [String, nil] optional :instructions, String, nil?: true # @!attribute max_completion_tokens # The maximum number of completion tokens that may be used over the course of the - # run. The run will make a best effort to use only the number of completion tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # completion tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. # # @return [Integer, nil] optional :max_completion_tokens, Integer, nil?: true # @!attribute max_prompt_tokens # The maximum number of prompt tokens that may be used over the course of the run. - # The run will make a best effort to use only the number of prompt tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # prompt tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. # # @return [Integer, nil] optional :max_prompt_tokens, Integer, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute model # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. # # @return [String, Symbol, OpenAI::Models::ChatModel, nil] - optional :model, union: -> { OpenAI::Models::Beta::Threads::RunCreateParams::Model }, nil?: true + optional :model, union: -> { OpenAI::Beta::Threads::RunCreateParams::Model }, nil?: true - # @!attribute [r] parallel_tool_calls + # @!attribute parallel_tool_calls # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. # # @return [Boolean, nil] - optional :parallel_tool_calls, OpenAI::BooleanModel - - # @!parse - # # @return [Boolean] - # attr_writer :parallel_tool_calls + optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean # @!attribute reasoning_effort - # **o-series models only** - # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] - optional :reasoning_effort, enum: -> { OpenAI::Models::ReasoningEffort }, nil?: true + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true # @!attribute response_format # Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. - # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. # # @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - optional :response_format, union: -> { OpenAI::Models::Beta::AssistantResponseFormatOption }, nil?: true + optional :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute tool_choice # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # # @return [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] - optional :tool_choice, union: -> { OpenAI::Models::Beta::AssistantToolChoiceOption }, nil?: true + optional :tool_choice, union: -> { OpenAI::Beta::AssistantToolChoiceOption }, nil?: true # @!attribute tools # Override the tools the assistant can use for this run. This is useful for - # modifying the behavior on a per-run basis. + # modifying the behavior on a per-run basis. # # @return [Array, nil] - optional :tools, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool] }, nil?: true + optional :tools, + -> { + OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool] + }, + nil?: true # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or temperature but not both. + # We generally recommend altering this or temperature but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true # @!attribute truncation_strategy # Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # control the initial context window of the run. # # @return [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] optional :truncation_strategy, - -> { OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy }, + -> { OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy }, nil?: true - # @!parse - # # @param assistant_id [String] - # # @param include [Array] - # # @param additional_instructions [String, nil] - # # @param additional_messages [Array, nil] - # # @param instructions [String, nil] - # # @param max_completion_tokens [Integer, nil] - # # @param max_prompt_tokens [Integer, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param model [String, Symbol, OpenAI::Models::ChatModel, nil] - # # @param parallel_tool_calls [Boolean] - # # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] - # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] - # # @param temperature [Float, nil] - # # @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] - # # @param tools [Array, nil] - # # @param top_p [Float, nil] - # # @param truncation_strategy [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # assistant_id:, - # include: nil, - # additional_instructions: nil, - # additional_messages: nil, - # instructions: nil, - # max_completion_tokens: nil, - # max_prompt_tokens: nil, - # metadata: nil, - # model: nil, - # parallel_tool_calls: nil, - # reasoning_effort: nil, - # response_format: nil, - # temperature: nil, - # tool_choice: nil, - # tools: nil, - # top_p: nil, - # truncation_strategy: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class AdditionalMessage < OpenAI::BaseModel + # @!method initialize(assistant_id:, include: nil, additional_instructions: nil, additional_messages: nil, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_choice: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunCreateParams} for more details. + # + # @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista + # + # @param include [Array] A list of additional fields to include in the response. Currently the only suppo + # + # @param additional_instructions [String, nil] Appends additional instructions at the end of the instructions for the run. This + # + # @param additional_messages [Array, nil] Adds additional messages to the thread before creating the run. + # + # @param instructions [String, nil] Overrides the [instructions](https://platform.openai.com/docs/api-reference/assi + # + # @param max_completion_tokens [Integer, nil] The maximum number of completion tokens that may be used over the course of the + # + # @param max_prompt_tokens [Integer, nil] The maximum number of prompt tokens that may be used over the course of the run. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String, Symbol, OpenAI::Models::ChatModel, nil] The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # + # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g + # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: + # + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m + # + # @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Controls which (if any) tool is called by the model. + # + # @param tools [Array, nil] Override the tools the assistant can use for this run. This is useful for modify + # + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the + # + # @param truncation_strategy [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + class AdditionalMessage < OpenAI::Internal::Type::BaseModel # @!attribute content # The text contents of the message. # # @return [String, Array] - required :content, - union: -> { OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Content } + required :content, union: -> { OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Content } # @!attribute role # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. # # @return [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Role] - required :role, enum: -> { OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Role } + required :role, enum: -> { OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Role } # @!attribute attachments # A list of files attached to the message, and the tools they should be added to. # # @return [Array, nil] optional :attachments, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment] }, + -> { + OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment] + }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!parse - # # @param content [String, Array] - # # @param role [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Role] - # # @param attachments [Array, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # - # def initialize(content:, role:, attachments: nil, metadata: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content:, role:, attachments: nil, metadata: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage} for more + # details. + # + # @param content [String, Array] The text contents of the message. + # + # @param role [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Role] The role of the entity that is creating the message. Allowed values include: # + # @param attachments [Array, nil] A list of files attached to the message, and the tools they should be added to. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # The text contents of the message. - class Content < OpenAI::Union - MessageContentPartParamArray = OpenAI::ArrayOf[union: -> { OpenAI::Models::Beta::Threads::MessageContentPartParam }] + # + # @see OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage#content + module Content + extend OpenAI::Internal::Type::Union # The text contents of the message. variant String # An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models). - variant OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Content::MessageContentPartParamArray + variant -> { OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Content::MessageContentPartParamArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + MessageContentPartParamArray = + OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Beta::Threads::MessageContentPartParam }] end - # @abstract - # # The role of the entity that is creating the message. Allowed values include: # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. - class Role < OpenAI::Enum + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + # + # @see OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage#role + module Role + extend OpenAI::Internal::Type::Enum + USER = :user ASSISTANT = :assistant - finalize! + # @!method self.values + # @return [Array] end - class Attachment < OpenAI::BaseModel - # @!attribute [r] file_id + class Attachment < OpenAI::Internal::Type::BaseModel + # @!attribute file_id # The ID of the file to attach to the message. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!attribute [r] tools + # @!attribute tools # The tools to add this file to. # # @return [Array, nil] optional :tools, - -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool] } + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool] } - # @!parse - # # @return [Array] - # attr_writer :tools - - # @!parse - # # @param file_id [String] - # # @param tools [Array] - # # - # def initialize(file_id: nil, tools: nil, **) = super + # @!method initialize(file_id: nil, tools: nil) + # @param file_id [String] The ID of the file to attach to the message. + # + # @param tools [Array] The tools to add this file to. - # def initialize: (Hash | OpenAI::BaseModel) -> void + module Tool + extend OpenAI::Internal::Type::Union - # @abstract - # - class Tool < OpenAI::Union discriminator :type - variant :code_interpreter, -> { OpenAI::Models::Beta::CodeInterpreterTool } + variant :code_interpreter, -> { OpenAI::Beta::CodeInterpreterTool } variant :file_search, - -> { OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch } + -> { OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of tool being defined: `file_search` # # @return [Symbol, :file_search] required :type, const: :file_search - # @!parse - # # @param type [Symbol, :file_search] - # # - # def initialize(type: :file_search, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :file_search) + # @param type [Symbol, :file_search] The type of tool being defined: `file_search` end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch)] end end end - # @abstract - # # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. - class Model < OpenAI::Union + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. + module Model + extend OpenAI::Internal::Type::Union + variant String # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - variant enum: -> { OpenAI::Models::ChatModel } + variant enum: -> { OpenAI::ChatModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::ChatModel)] end - class TruncationStrategy < OpenAI::BaseModel + class TruncationStrategy < OpenAI::Internal::Type::BaseModel # @!attribute type # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. # # @return [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type] - required :type, enum: -> { OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type } + required :type, enum: -> { OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type } # @!attribute last_messages # The number of most recent messages from the thread when constructing the context - # for the run. + # for the run. # # @return [Integer, nil] optional :last_messages, Integer, nil?: true - # @!parse - # # Controls for how a thread will be truncated prior to the run. Use this to - # # control the intial context window of the run. - # # - # # @param type [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type] - # # @param last_messages [Integer, nil] - # # - # def initialize(type:, last_messages: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(type:, last_messages: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy} for more + # details. + # + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + # + # @param type [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to # + # @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context + # The truncation strategy to use for the thread. The default is `auto`. If set to - # `last_messages`, the thread will be truncated to the n most recent messages in - # the thread. When set to `auto`, messages in the middle of the thread will be - # dropped to fit the context length of the model, `max_prompt_tokens`. - class Type < OpenAI::Enum + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + # + # @see OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy#type + module Type + extend OpenAI::Internal::Type::Enum + AUTO = :auto LAST_MESSAGES = :last_messages - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/run_list_params.rb b/lib/openai/models/beta/threads/run_list_params.rb index ce011119..5f50a4bb 100644 --- a/lib/openai/models/beta/threads/run_list_params.rb +++ b/lib/openai/models/beta/threads/run_list_params.rb @@ -4,79 +4,67 @@ module OpenAI module Models module Beta module Threads - class RunListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Runs#list + class RunListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] before + # @!attribute before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String - # @!parse - # # @return [String] - # attr_writer :before - - # @!attribute [r] limit + # @!attribute limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!attribute [r] order + # @!attribute order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::Beta::Threads::RunListParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::Beta::Threads::RunListParams::Order } - - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::Threads::RunListParams::Order] - # attr_writer :order - - # @!parse - # # @param after [String] - # # @param before [String] - # # @param limit [Integer] - # # @param order [Symbol, OpenAI::Models::Beta::Threads::RunListParams::Order] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, before: nil, limit: nil, order: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :order, enum: -> { OpenAI::Beta::Threads::RunListParams::Order } - # @abstract + # @!method initialize(after: nil, before: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunListParams} for more details. # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::Beta::Threads::RunListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. - class Order < OpenAI::Enum + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/run_retrieve_params.rb b/lib/openai/models/beta/threads/run_retrieve_params.rb index bd9194cf..307672fd 100644 --- a/lib/openai/models/beta/threads/run_retrieve_params.rb +++ b/lib/openai/models/beta/threads/run_retrieve_params.rb @@ -4,23 +4,19 @@ module OpenAI module Models module Beta module Threads - class RunRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Runs#retrieve + class RunRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute thread_id # # @return [String] required :thread_id, String - # @!parse - # # @param thread_id [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(thread_id:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(thread_id:, request_options: {}) + # @param thread_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/threads/run_status.rb b/lib/openai/models/beta/threads/run_status.rb index b85ac1e3..cde93d93 100644 --- a/lib/openai/models/beta/threads/run_status.rb +++ b/lib/openai/models/beta/threads/run_status.rb @@ -4,12 +4,12 @@ module OpenAI module Models module Beta module Threads - # @abstract - # # The status of the run, which can be either `queued`, `in_progress`, - # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, - # `incomplete`, or `expired`. - class RunStatus < OpenAI::Enum + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. + module RunStatus + extend OpenAI::Internal::Type::Enum + QUEUED = :queued IN_PROGRESS = :in_progress REQUIRES_ACTION = :requires_action @@ -20,7 +20,8 @@ class RunStatus < OpenAI::Enum INCOMPLETE = :incomplete EXPIRED = :expired - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb b/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb index 5c747924..4e404c85 100644 --- a/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb +++ b/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb @@ -4,10 +4,12 @@ module OpenAI module Models module Beta module Threads - class RunSubmitToolOutputsParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs + # + # @see OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs_stream_raw + class RunSubmitToolOutputsParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute thread_id # @@ -19,46 +21,37 @@ class RunSubmitToolOutputsParams < OpenAI::BaseModel # # @return [Array] required :tool_outputs, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput] } + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput] } - # @!parse - # # @param thread_id [String] - # # @param tool_outputs [Array] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(thread_id:, tool_outputs:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(thread_id:, tool_outputs:, request_options: {}) + # @param thread_id [String] + # + # @param tool_outputs [Array] A list of tools for which the outputs are being submitted. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - class ToolOutput < OpenAI::BaseModel - # @!attribute [r] output + class ToolOutput < OpenAI::Internal::Type::BaseModel + # @!attribute output # The output of the tool call to be submitted to continue the run. # # @return [String, nil] optional :output, String - # @!parse - # # @return [String] - # attr_writer :output - - # @!attribute [r] tool_call_id + # @!attribute tool_call_id # The ID of the tool call in the `required_action` object within the run object - # the output is being submitted for. + # the output is being submitted for. # # @return [String, nil] optional :tool_call_id, String - # @!parse - # # @return [String] - # attr_writer :tool_call_id - - # @!parse - # # @param output [String] - # # @param tool_call_id [String] - # # - # def initialize(output: nil, tool_call_id: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(output: nil, tool_call_id: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput} for more + # details. + # + # @param output [String] The output of the tool call to be submitted to continue the run. + # + # @param tool_call_id [String] The ID of the tool call in the `required_action` object within the run object th end end end diff --git a/lib/openai/models/beta/threads/run_update_params.rb b/lib/openai/models/beta/threads/run_update_params.rb index 9d1f9b01..0033640d 100644 --- a/lib/openai/models/beta/threads/run_update_params.rb +++ b/lib/openai/models/beta/threads/run_update_params.rb @@ -4,10 +4,10 @@ module OpenAI module Models module Beta module Threads - class RunUpdateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Runs#update + class RunUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute thread_id # @@ -16,23 +16,24 @@ class RunUpdateParams < OpenAI::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!parse - # # @param thread_id [String] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(thread_id:, metadata: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(thread_id:, metadata: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunUpdateParams} for more details. + # + # @param thread_id [String] + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/threads/runs/code_interpreter_logs.rb b/lib/openai/models/beta/threads/runs/code_interpreter_logs.rb index db4bce17..2abe8916 100644 --- a/lib/openai/models/beta/threads/runs/code_interpreter_logs.rb +++ b/lib/openai/models/beta/threads/runs/code_interpreter_logs.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads module Runs - class CodeInterpreterLogs < OpenAI::BaseModel + class CodeInterpreterLogs < OpenAI::Internal::Type::BaseModel # @!attribute index # The index of the output in the outputs array. # @@ -18,26 +18,20 @@ class CodeInterpreterLogs < OpenAI::BaseModel # @return [Symbol, :logs] required :type, const: :logs - # @!attribute [r] logs + # @!attribute logs # The text output from the Code Interpreter tool call. # # @return [String, nil] optional :logs, String - # @!parse - # # @return [String] - # attr_writer :logs - - # @!parse - # # Text output from the Code Interpreter tool call as part of a run step. - # # - # # @param index [Integer] - # # @param logs [String] - # # @param type [Symbol, :logs] - # # - # def initialize(index:, logs: nil, type: :logs, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, logs: nil, type: :logs) + # Text output from the Code Interpreter tool call as part of a run step. + # + # @param index [Integer] The index of the output in the outputs array. + # + # @param logs [String] The text output from the Code Interpreter tool call. + # + # @param type [Symbol, :logs] Always `logs`. end end end diff --git a/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rb b/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rb index 89ebfd97..b00adb80 100644 --- a/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rb +++ b/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads module Runs - class CodeInterpreterOutputImage < OpenAI::BaseModel + class CodeInterpreterOutputImage < OpenAI::Internal::Type::BaseModel # @!attribute index # The index of the output in the outputs array. # @@ -18,42 +18,33 @@ class CodeInterpreterOutputImage < OpenAI::BaseModel # @return [Symbol, :image] required :type, const: :image - # @!attribute [r] image + # @!attribute image # # @return [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image, nil] - optional :image, -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image } + optional :image, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image } - # @!parse - # # @return [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image] - # attr_writer :image - - # @!parse - # # @param index [Integer] - # # @param image [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image] - # # @param type [Symbol, :image] - # # - # def initialize(index:, image: nil, type: :image, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, image: nil, type: :image) + # @param index [Integer] The index of the output in the outputs array. + # + # @param image [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image] + # + # @param type [Symbol, :image] Always `image`. - class Image < OpenAI::BaseModel - # @!attribute [r] file_id + # @see OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage#image + class Image < OpenAI::Internal::Type::BaseModel + # @!attribute file_id # The [file](https://platform.openai.com/docs/api-reference/files) ID of the - # image. + # image. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!parse - # # @param file_id [String] - # # - # def initialize(file_id: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_id: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image} for + # more details. + # + # @param file_id [String] The [file](https://platform.openai.com/docs/api-reference/files) ID of the image end end end diff --git a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb index fcde0967..33268434 100644 --- a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb +++ b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads module Runs - class CodeInterpreterToolCall < OpenAI::BaseModel + class CodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel # @!attribute id # The ID of the tool call. # @@ -16,28 +16,29 @@ class CodeInterpreterToolCall < OpenAI::BaseModel # The Code Interpreter tool call definition. # # @return [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter] - required :code_interpreter, - -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter } + required :code_interpreter, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter } # @!attribute type # The type of tool call. This is always going to be `code_interpreter` for this - # type of tool call. + # type of tool call. # # @return [Symbol, :code_interpreter] required :type, const: :code_interpreter - # @!parse - # # Details of the Code Interpreter tool call the run step was involved in. - # # - # # @param id [String] - # # @param code_interpreter [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter] - # # @param type [Symbol, :code_interpreter] - # # - # def initialize(id:, code_interpreter:, type: :code_interpreter, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, code_interpreter:, type: :code_interpreter) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall} for more details. + # + # Details of the Code Interpreter tool call the run step was involved in. + # + # @param id [String] The ID of the tool call. + # + # @param code_interpreter [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter] The Code Interpreter tool call definition. + # + # @param type [Symbol, :code_interpreter] The type of tool call. This is always going to be `code_interpreter` for this ty - class CodeInterpreter < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall#code_interpreter + class CodeInterpreter < OpenAI::Internal::Type::BaseModel # @!attribute input # The input to the Code Interpreter tool call. # @@ -46,37 +47,37 @@ class CodeInterpreter < OpenAI::BaseModel # @!attribute outputs # The outputs from the Code Interpreter tool call. Code Interpreter can output one - # or more items, including text (`logs`) or images (`image`). Each of these are - # represented by a different object type. + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. # # @return [Array] required :outputs, - -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output] } + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output] } - # @!parse - # # The Code Interpreter tool call definition. - # # - # # @param input [String] - # # @param outputs [Array] - # # - # def initialize(input:, outputs:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(input:, outputs:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter} + # for more details. + # + # The Code Interpreter tool call definition. # + # @param input [String] The input to the Code Interpreter tool call. + # + # @param outputs [Array] The outputs from the Code Interpreter tool call. Code Interpreter can output one + # Text output from the Code Interpreter tool call as part of a run step. - class Output < OpenAI::Union + module Output + extend OpenAI::Internal::Type::Union + discriminator :type # Text output from the Code Interpreter tool call as part of a run step. - variant :logs, - -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs } + variant :logs, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs } variant :image, - -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image } + -> { OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image } - class Logs < OpenAI::BaseModel + class Logs < OpenAI::Internal::Type::BaseModel # @!attribute logs # The text output from the Code Interpreter tool call. # @@ -89,23 +90,20 @@ class Logs < OpenAI::BaseModel # @return [Symbol, :logs] required :type, const: :logs - # @!parse - # # Text output from the Code Interpreter tool call as part of a run step. - # # - # # @param logs [String] - # # @param type [Symbol, :logs] - # # - # def initialize(logs:, type: :logs, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(logs:, type: :logs) + # Text output from the Code Interpreter tool call as part of a run step. + # + # @param logs [String] The text output from the Code Interpreter tool call. + # + # @param type [Symbol, :logs] Always `logs`. end - class Image < OpenAI::BaseModel + class Image < OpenAI::Internal::Type::BaseModel # @!attribute image # # @return [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image] required :image, - -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image } + -> { OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image } # @!attribute type # Always `image`. @@ -113,30 +111,31 @@ class Image < OpenAI::BaseModel # @return [Symbol, :image] required :type, const: :image - # @!parse - # # @param image [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image] - # # @param type [Symbol, :image] - # # - # def initialize(image:, type: :image, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(image:, type: :image) + # @param image [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image] + # + # @param type [Symbol, :image] Always `image`. - class Image < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image#image + class Image < OpenAI::Internal::Type::BaseModel # @!attribute file_id # The [file](https://platform.openai.com/docs/api-reference/files) ID of the - # image. + # image. # # @return [String] required :file_id, String - # @!parse - # # @param file_id [String] - # # - # def initialize(file_id:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image} + # for more details. + # + # @param file_id [String] The [file](https://platform.openai.com/docs/api-reference/files) ID of the image end end + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image)] end end end diff --git a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb index ce8315ac..853807ee 100644 --- a/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb +++ b/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads module Runs - class CodeInterpreterToolCallDelta < OpenAI::BaseModel + class CodeInterpreterToolCallDelta < OpenAI::Internal::Type::BaseModel # @!attribute index # The index of the tool call in the tool calls array. # @@ -14,88 +14,80 @@ class CodeInterpreterToolCallDelta < OpenAI::BaseModel # @!attribute type # The type of tool call. This is always going to be `code_interpreter` for this - # type of tool call. + # type of tool call. # # @return [Symbol, :code_interpreter] required :type, const: :code_interpreter - # @!attribute [r] id + # @!attribute id # The ID of the tool call. # # @return [String, nil] optional :id, String - # @!parse - # # @return [String] - # attr_writer :id - - # @!attribute [r] code_interpreter + # @!attribute code_interpreter # The Code Interpreter tool call definition. # # @return [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter, nil] optional :code_interpreter, - -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter } - - # @!parse - # # @return [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter] - # attr_writer :code_interpreter - - # @!parse - # # Details of the Code Interpreter tool call the run step was involved in. - # # - # # @param index [Integer] - # # @param id [String] - # # @param code_interpreter [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter] - # # @param type [Symbol, :code_interpreter] - # # - # def initialize(index:, id: nil, code_interpreter: nil, type: :code_interpreter, **) = super + -> { OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter } - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, id: nil, code_interpreter: nil, type: :code_interpreter) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta} for more + # details. + # + # Details of the Code Interpreter tool call the run step was involved in. + # + # @param index [Integer] The index of the tool call in the tool calls array. + # + # @param id [String] The ID of the tool call. + # + # @param code_interpreter [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter] The Code Interpreter tool call definition. + # + # @param type [Symbol, :code_interpreter] The type of tool call. This is always going to be `code_interpreter` for this ty - class CodeInterpreter < OpenAI::BaseModel - # @!attribute [r] input + # @see OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta#code_interpreter + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + # @!attribute input # The input to the Code Interpreter tool call. # # @return [String, nil] optional :input, String - # @!parse - # # @return [String] - # attr_writer :input - - # @!attribute [r] outputs + # @!attribute outputs # The outputs from the Code Interpreter tool call. Code Interpreter can output one - # or more items, including text (`logs`) or images (`image`). Each of these are - # represented by a different object type. + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. # # @return [Array, nil] optional :outputs, - -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::Output] } + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::Output] } - # @!parse - # # @return [Array] - # attr_writer :outputs - - # @!parse - # # The Code Interpreter tool call definition. - # # - # # @param input [String] - # # @param outputs [Array] - # # - # def initialize(input: nil, outputs: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(input: nil, outputs: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter} + # for more details. + # + # The Code Interpreter tool call definition. # + # @param input [String] The input to the Code Interpreter tool call. + # + # @param outputs [Array] The outputs from the Code Interpreter tool call. Code Interpreter can output one + # Text output from the Code Interpreter tool call as part of a run step. - class Output < OpenAI::Union + module Output + extend OpenAI::Internal::Type::Union + discriminator :type # Text output from the Code Interpreter tool call as part of a run step. - variant :logs, -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs } + variant :logs, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterLogs } + + variant :image, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage } - variant :image, -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage } + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage)] end end end diff --git a/lib/openai/models/beta/threads/runs/file_search_tool_call.rb b/lib/openai/models/beta/threads/runs/file_search_tool_call.rb index 9011d196..1877433e 100644 --- a/lib/openai/models/beta/threads/runs/file_search_tool_call.rb +++ b/lib/openai/models/beta/threads/runs/file_search_tool_call.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads module Runs - class FileSearchToolCall < OpenAI::BaseModel + class FileSearchToolCall < OpenAI::Internal::Type::BaseModel # @!attribute id # The ID of the tool call object. # @@ -16,96 +16,92 @@ class FileSearchToolCall < OpenAI::BaseModel # For now, this is always going to be an empty object. # # @return [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch] - required :file_search, -> { OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch } + required :file_search, -> { OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch } # @!attribute type # The type of tool call. This is always going to be `file_search` for this type of - # tool call. + # tool call. # # @return [Symbol, :file_search] required :type, const: :file_search - # @!parse - # # @param id [String] - # # @param file_search [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch] - # # @param type [Symbol, :file_search] - # # - # def initialize(id:, file_search:, type: :file_search, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, file_search:, type: :file_search) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall} for more details. + # + # @param id [String] The ID of the tool call object. + # + # @param file_search [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch] For now, this is always going to be an empty object. + # + # @param type [Symbol, :file_search] The type of tool call. This is always going to be `file_search` for this type of - class FileSearch < OpenAI::BaseModel - # @!attribute [r] ranking_options + # @see OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall#file_search + class FileSearch < OpenAI::Internal::Type::BaseModel + # @!attribute ranking_options # The ranking options for the file search. # # @return [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, nil] optional :ranking_options, - -> { OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions } + -> { OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions } - # @!parse - # # @return [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions] - # attr_writer :ranking_options - - # @!attribute [r] results + # @!attribute results # The results of the file search. # # @return [Array, nil] optional :results, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] } - - # @!parse - # # @return [Array] - # attr_writer :results + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] } - # @!parse - # # For now, this is always going to be an empty object. - # # - # # @param ranking_options [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions] - # # @param results [Array] - # # - # def initialize(ranking_options: nil, results: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(ranking_options: nil, results: nil) + # For now, this is always going to be an empty object. + # + # @param ranking_options [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions] The ranking options for the file search. + # + # @param results [Array] The results of the file search. - class RankingOptions < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch#ranking_options + class RankingOptions < OpenAI::Internal::Type::BaseModel # @!attribute ranker # The ranker to use for the file search. If not specified will use the `auto` - # ranker. + # ranker. # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker] required :ranker, - enum: -> { OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker } + enum: -> { OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker } # @!attribute score_threshold # The score threshold for the file search. All values must be a floating point - # number between 0 and 1. + # number between 0 and 1. # # @return [Float] required :score_threshold, Float - # @!parse - # # The ranking options for the file search. - # # - # # @param ranker [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker] - # # @param score_threshold [Float] - # # - # def initialize(ranker:, score_threshold:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(ranker:, score_threshold:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions} + # for more details. # + # The ranking options for the file search. + # + # @param ranker [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker] The ranker to use for the file search. If not specified will use the `auto` rank + # + # @param score_threshold [Float] The score threshold for the file search. All values must be a floating point num + # The ranker to use for the file search. If not specified will use the `auto` - # ranker. - class Ranker < OpenAI::Enum + # ranker. + # + # @see OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions#ranker + module Ranker + extend OpenAI::Internal::Type::Enum + AUTO = :auto DEFAULT_2024_08_21 = :default_2024_08_21 - finalize! + # @!method self.values + # @return [Array] end end - class Result < OpenAI::BaseModel + class Result < OpenAI::Internal::Type::BaseModel # @!attribute file_id # The ID of the file that result was found in. # @@ -120,72 +116,63 @@ class Result < OpenAI::BaseModel # @!attribute score # The score of the result. All values must be a floating point number between 0 - # and 1. + # and 1. # # @return [Float] required :score, Float - # @!attribute [r] content + # @!attribute content # The content of the result that was found. The content is only included if - # requested via the include query parameter. + # requested via the include query parameter. # # @return [Array, nil] optional :content, - -> { OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] } - - # @!parse - # # @return [Array] - # attr_writer :content - - # @!parse - # # A result instance of the file search. - # # - # # @param file_id [String] - # # @param file_name [String] - # # @param score [Float] - # # @param content [Array] - # # - # def initialize(file_id:, file_name:, score:, content: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Content < OpenAI::BaseModel - # @!attribute [r] text + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] } + + # @!method initialize(file_id:, file_name:, score:, content: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result} + # for more details. + # + # A result instance of the file search. + # + # @param file_id [String] The ID of the file that result was found in. + # + # @param file_name [String] The name of the file that result was found in. + # + # @param score [Float] The score of the result. All values must be a floating point number between 0 an + # + # @param content [Array] The content of the result that was found. The content is only included if reques + + class Content < OpenAI::Internal::Type::BaseModel + # @!attribute text # The text content of the file. # # @return [String, nil] optional :text, String - # @!parse - # # @return [String] - # attr_writer :text - - # @!attribute [r] type + # @!attribute type # The type of the content. # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type, nil] optional :type, - enum: -> { OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type } - - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type] - # attr_writer :type - - # @!parse - # # @param text [String] - # # @param type [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type] - # # - # def initialize(text: nil, type: nil, **) = super + enum: -> { OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(text: nil, type: nil) + # @param text [String] The text content of the file. # + # @param type [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type] The type of the content. + # The type of the content. - class Type < OpenAI::Enum + # + # @see OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content#type + module Type + extend OpenAI::Internal::Type::Enum + TEXT = :text - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rb b/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rb index 86dd6012..13c9f547 100644 --- a/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rb +++ b/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rb @@ -5,12 +5,12 @@ module Models module Beta module Threads module Runs - class FileSearchToolCallDelta < OpenAI::BaseModel + class FileSearchToolCallDelta < OpenAI::Internal::Type::BaseModel # @!attribute file_search # For now, this is always going to be an empty object. # # @return [Object] - required :file_search, OpenAI::Unknown + required :file_search, OpenAI::Internal::Type::Unknown # @!attribute index # The index of the tool call in the tool calls array. @@ -20,30 +20,28 @@ class FileSearchToolCallDelta < OpenAI::BaseModel # @!attribute type # The type of tool call. This is always going to be `file_search` for this type of - # tool call. + # tool call. # # @return [Symbol, :file_search] required :type, const: :file_search - # @!attribute [r] id + # @!attribute id # The ID of the tool call object. # # @return [String, nil] optional :id, String - # @!parse - # # @return [String] - # attr_writer :id - - # @!parse - # # @param file_search [Object] - # # @param index [Integer] - # # @param id [String] - # # @param type [Symbol, :file_search] - # # - # def initialize(file_search:, index:, id: nil, type: :file_search, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_search:, index:, id: nil, type: :file_search) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta} for more details. + # + # @param file_search [Object] For now, this is always going to be an empty object. + # + # @param index [Integer] The index of the tool call in the tool calls array. + # + # @param id [String] The ID of the tool call object. + # + # @param type [Symbol, :file_search] The type of tool call. This is always going to be `file_search` for this type of end end end diff --git a/lib/openai/models/beta/threads/runs/function_tool_call.rb b/lib/openai/models/beta/threads/runs/function_tool_call.rb index f3e392ba..de633613 100644 --- a/lib/openai/models/beta/threads/runs/function_tool_call.rb +++ b/lib/openai/models/beta/threads/runs/function_tool_call.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads module Runs - class FunctionToolCall < OpenAI::BaseModel + class FunctionToolCall < OpenAI::Internal::Type::BaseModel # @!attribute id # The ID of the tool call object. # @@ -16,25 +16,27 @@ class FunctionToolCall < OpenAI::BaseModel # The definition of the function that was called. # # @return [OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function] - required :function, -> { OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function } + required :function, -> { OpenAI::Beta::Threads::Runs::FunctionToolCall::Function } # @!attribute type # The type of tool call. This is always going to be `function` for this type of - # tool call. + # tool call. # # @return [Symbol, :function] required :type, const: :function - # @!parse - # # @param id [String] - # # @param function [OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function] - # # @param type [Symbol, :function] - # # - # def initialize(id:, function:, type: :function, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, function:, type: :function) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::FunctionToolCall} for more details. + # + # @param id [String] The ID of the tool call object. + # + # @param function [OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function] The definition of the function that was called. + # + # @param type [Symbol, :function] The type of tool call. This is always going to be `function` for this type of to - class Function < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Runs::FunctionToolCall#function + class Function < OpenAI::Internal::Type::BaseModel # @!attribute arguments # The arguments passed to the function. # @@ -49,22 +51,24 @@ class Function < OpenAI::BaseModel # @!attribute output # The output of the function. This will be `null` if the outputs have not been - # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) - # yet. + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. # # @return [String, nil] required :output, String, nil?: true - # @!parse - # # The definition of the function that was called. - # # - # # @param arguments [String] - # # @param name [String] - # # @param output [String, nil] - # # - # def initialize(arguments:, name:, output:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(arguments:, name:, output:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function} for more + # details. + # + # The definition of the function that was called. + # + # @param arguments [String] The arguments passed to the function. + # + # @param name [String] The name of the function. + # + # @param output [String, nil] The output of the function. This will be `null` if the outputs have not been [su end end end diff --git a/lib/openai/models/beta/threads/runs/function_tool_call_delta.rb b/lib/openai/models/beta/threads/runs/function_tool_call_delta.rb index 02069f3b..ad8391bb 100644 --- a/lib/openai/models/beta/threads/runs/function_tool_call_delta.rb +++ b/lib/openai/models/beta/threads/runs/function_tool_call_delta.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads module Runs - class FunctionToolCallDelta < OpenAI::BaseModel + class FunctionToolCallDelta < OpenAI::Internal::Type::BaseModel # @!attribute index # The index of the tool call in the tool calls array. # @@ -14,80 +14,69 @@ class FunctionToolCallDelta < OpenAI::BaseModel # @!attribute type # The type of tool call. This is always going to be `function` for this type of - # tool call. + # tool call. # # @return [Symbol, :function] required :type, const: :function - # @!attribute [r] id + # @!attribute id # The ID of the tool call object. # # @return [String, nil] optional :id, String - # @!parse - # # @return [String] - # attr_writer :id - - # @!attribute [r] function + # @!attribute function # The definition of the function that was called. # # @return [OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function, nil] - optional :function, -> { OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function } - - # @!parse - # # @return [OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function] - # attr_writer :function + optional :function, -> { OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function } - # @!parse - # # @param index [Integer] - # # @param id [String] - # # @param function [OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function] - # # @param type [Symbol, :function] - # # - # def initialize(index:, id: nil, function: nil, type: :function, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, id: nil, function: nil, type: :function) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta} for more details. + # + # @param index [Integer] The index of the tool call in the tool calls array. + # + # @param id [String] The ID of the tool call object. + # + # @param function [OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function] The definition of the function that was called. + # + # @param type [Symbol, :function] The type of tool call. This is always going to be `function` for this type of to - class Function < OpenAI::BaseModel - # @!attribute [r] arguments + # @see OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta#function + class Function < OpenAI::Internal::Type::BaseModel + # @!attribute arguments # The arguments passed to the function. # # @return [String, nil] optional :arguments, String - # @!parse - # # @return [String] - # attr_writer :arguments - - # @!attribute [r] name + # @!attribute name # The name of the function. # # @return [String, nil] optional :name, String - # @!parse - # # @return [String] - # attr_writer :name - # @!attribute output # The output of the function. This will be `null` if the outputs have not been - # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) - # yet. + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. # # @return [String, nil] optional :output, String, nil?: true - # @!parse - # # The definition of the function that was called. - # # - # # @param arguments [String] - # # @param name [String] - # # @param output [String, nil] - # # - # def initialize(arguments: nil, name: nil, output: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(arguments: nil, name: nil, output: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function} for more + # details. + # + # The definition of the function that was called. + # + # @param arguments [String] The arguments passed to the function. + # + # @param name [String] The name of the function. + # + # @param output [String, nil] The output of the function. This will be `null` if the outputs have not been [su end end end diff --git a/lib/openai/models/beta/threads/runs/message_creation_step_details.rb b/lib/openai/models/beta/threads/runs/message_creation_step_details.rb index 459f5bdc..0b5b6ac9 100644 --- a/lib/openai/models/beta/threads/runs/message_creation_step_details.rb +++ b/lib/openai/models/beta/threads/runs/message_creation_step_details.rb @@ -5,12 +5,12 @@ module Models module Beta module Threads module Runs - class MessageCreationStepDetails < OpenAI::BaseModel + class MessageCreationStepDetails < OpenAI::Internal::Type::BaseModel # @!attribute message_creation # # @return [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation] required :message_creation, - -> { OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation } + -> { OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation } # @!attribute type # Always `message_creation`. @@ -18,29 +18,23 @@ class MessageCreationStepDetails < OpenAI::BaseModel # @return [Symbol, :message_creation] required :type, const: :message_creation - # @!parse - # # Details of the message creation by the run step. - # # - # # @param message_creation [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation] - # # @param type [Symbol, :message_creation] - # # - # def initialize(message_creation:, type: :message_creation, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(message_creation:, type: :message_creation) + # Details of the message creation by the run step. + # + # @param message_creation [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation] + # + # @param type [Symbol, :message_creation] Always `message_creation`. - class MessageCreation < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails#message_creation + class MessageCreation < OpenAI::Internal::Type::BaseModel # @!attribute message_id # The ID of the message that was created by this run step. # # @return [String] required :message_id, String - # @!parse - # # @param message_id [String] - # # - # def initialize(message_id:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(message_id:) + # @param message_id [String] The ID of the message that was created by this run step. end end end diff --git a/lib/openai/models/beta/threads/runs/run_step.rb b/lib/openai/models/beta/threads/runs/run_step.rb index ae1e41c3..fde3abeb 100644 --- a/lib/openai/models/beta/threads/runs/run_step.rb +++ b/lib/openai/models/beta/threads/runs/run_step.rb @@ -5,7 +5,8 @@ module Models module Beta module Threads module Runs - class RunStep < OpenAI::BaseModel + # @see OpenAI::Resources::Beta::Threads::Runs::Steps#retrieve + class RunStep < OpenAI::Internal::Type::BaseModel # @!attribute id # The identifier of the run step, which can be referenced in API endpoints. # @@ -14,8 +15,8 @@ class RunStep < OpenAI::BaseModel # @!attribute assistant_id # The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) - # associated with the run step. + # [assistant](https://platform.openai.com/docs/api-reference/assistants) + # associated with the run step. # # @return [String] required :assistant_id, String @@ -40,7 +41,7 @@ class RunStep < OpenAI::BaseModel # @!attribute expired_at # The Unix timestamp (in seconds) for when the run step expired. A step is - # considered expired if the parent run is expired. + # considered expired if the parent run is expired. # # @return [Integer, nil] required :expired_at, Integer, nil?: true @@ -53,21 +54,21 @@ class RunStep < OpenAI::BaseModel # @!attribute last_error # The last error associated with this run step. Will be `null` if there are no - # errors. + # errors. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep::LastError, nil] - required :last_error, -> { OpenAI::Models::Beta::Threads::Runs::RunStep::LastError }, nil?: true + required :last_error, -> { OpenAI::Beta::Threads::Runs::RunStep::LastError }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - required :metadata, OpenAI::HashOf[String], nil?: true + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute object # The object type, which is always `thread.run.step`. @@ -77,27 +78,27 @@ class RunStep < OpenAI::BaseModel # @!attribute run_id # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that - # this run step is a part of. + # this run step is a part of. # # @return [String] required :run_id, String # @!attribute status # The status of the run step, which can be either `in_progress`, `cancelled`, - # `failed`, `completed`, or `expired`. + # `failed`, `completed`, or `expired`. # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Status] - required :status, enum: -> { OpenAI::Models::Beta::Threads::Runs::RunStep::Status } + required :status, enum: -> { OpenAI::Beta::Threads::Runs::RunStep::Status } # @!attribute step_details # The details of the run step. # # @return [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails] - required :step_details, union: -> { OpenAI::Models::Beta::Threads::Runs::RunStep::StepDetails } + required :step_details, union: -> { OpenAI::Beta::Threads::Runs::RunStep::StepDetails } # @!attribute thread_id # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # that was run. + # that was run. # # @return [String] required :thread_id, String @@ -106,65 +107,60 @@ class RunStep < OpenAI::BaseModel # The type of run step, which can be either `message_creation` or `tool_calls`. # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Type] - required :type, enum: -> { OpenAI::Models::Beta::Threads::Runs::RunStep::Type } + required :type, enum: -> { OpenAI::Beta::Threads::Runs::RunStep::Type } # @!attribute usage # Usage statistics related to the run step. This value will be `null` while the - # run step's status is `in_progress`. + # run step's status is `in_progress`. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep::Usage, nil] - required :usage, -> { OpenAI::Models::Beta::Threads::Runs::RunStep::Usage }, nil?: true - - # @!parse - # # Represents a step in execution of a run. - # # - # # @param id [String] - # # @param assistant_id [String] - # # @param cancelled_at [Integer, nil] - # # @param completed_at [Integer, nil] - # # @param created_at [Integer] - # # @param expired_at [Integer, nil] - # # @param failed_at [Integer, nil] - # # @param last_error [OpenAI::Models::Beta::Threads::Runs::RunStep::LastError, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param run_id [String] - # # @param status [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Status] - # # @param step_details [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails] - # # @param thread_id [String] - # # @param type [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Type] - # # @param usage [OpenAI::Models::Beta::Threads::Runs::RunStep::Usage, nil] - # # @param object [Symbol, :"thread.run.step"] - # # - # def initialize( - # id:, - # assistant_id:, - # cancelled_at:, - # completed_at:, - # created_at:, - # expired_at:, - # failed_at:, - # last_error:, - # metadata:, - # run_id:, - # status:, - # step_details:, - # thread_id:, - # type:, - # usage:, - # object: :"thread.run.step", - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class LastError < OpenAI::BaseModel + required :usage, -> { OpenAI::Beta::Threads::Runs::RunStep::Usage }, nil?: true + + # @!method initialize(id:, assistant_id:, cancelled_at:, completed_at:, created_at:, expired_at:, failed_at:, last_error:, metadata:, run_id:, status:, step_details:, thread_id:, type:, usage:, object: :"thread.run.step") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::RunStep} for more details. + # + # Represents a step in execution of a run. + # + # @param id [String] The identifier of the run step, which can be referenced in API endpoints. + # + # @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista + # + # @param cancelled_at [Integer, nil] The Unix timestamp (in seconds) for when the run step was cancelled. + # + # @param completed_at [Integer, nil] The Unix timestamp (in seconds) for when the run step completed. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the run step was created. + # + # @param expired_at [Integer, nil] The Unix timestamp (in seconds) for when the run step expired. A step is conside + # + # @param failed_at [Integer, nil] The Unix timestamp (in seconds) for when the run step failed. + # + # @param last_error [OpenAI::Models::Beta::Threads::Runs::RunStep::LastError, nil] The last error associated with this run step. Will be `null` if there are no err + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param run_id [String] The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that th + # + # @param status [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Status] The status of the run step, which can be either `in_progress`, `cancelled`, `fai + # + # @param step_details [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails] The details of the run step. + # + # @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t + # + # @param type [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Type] The type of run step, which can be either `message_creation` or `tool_calls`. + # + # @param usage [OpenAI::Models::Beta::Threads::Runs::RunStep::Usage, nil] Usage statistics related to the run step. This value will be `null` while the ru + # + # @param object [Symbol, :"thread.run.step"] The object type, which is always `thread.run.step`. + + # @see OpenAI::Models::Beta::Threads::Runs::RunStep#last_error + class LastError < OpenAI::Internal::Type::BaseModel # @!attribute code # One of `server_error` or `rate_limit_exceeded`. # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::Code] - required :code, enum: -> { OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::Code } + required :code, enum: -> { OpenAI::Beta::Threads::Runs::RunStep::LastError::Code } # @!attribute message # A human-readable description of the error. @@ -172,66 +168,78 @@ class LastError < OpenAI::BaseModel # @return [String] required :message, String - # @!parse - # # The last error associated with this run step. Will be `null` if there are no - # # errors. - # # - # # @param code [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::Code] - # # @param message [String] - # # - # def initialize(code:, message:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(code:, message:) + # The last error associated with this run step. Will be `null` if there are no + # errors. + # + # @param code [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::Code] One of `server_error` or `rate_limit_exceeded`. # + # @param message [String] A human-readable description of the error. + # One of `server_error` or `rate_limit_exceeded`. - class Code < OpenAI::Enum + # + # @see OpenAI::Models::Beta::Threads::Runs::RunStep::LastError#code + module Code + extend OpenAI::Internal::Type::Enum + SERVER_ERROR = :server_error RATE_LIMIT_EXCEEDED = :rate_limit_exceeded - finalize! + # @!method self.values + # @return [Array] end end - # @abstract - # # The status of the run step, which can be either `in_progress`, `cancelled`, - # `failed`, `completed`, or `expired`. - class Status < OpenAI::Enum + # `failed`, `completed`, or `expired`. + # + # @see OpenAI::Models::Beta::Threads::Runs::RunStep#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress CANCELLED = :cancelled FAILED = :failed COMPLETED = :completed EXPIRED = :expired - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The details of the run step. - class StepDetails < OpenAI::Union + # + # @see OpenAI::Models::Beta::Threads::Runs::RunStep#step_details + module StepDetails + extend OpenAI::Internal::Type::Union + discriminator :type # Details of the message creation by the run step. - variant :message_creation, -> { OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails } + variant :message_creation, -> { OpenAI::Beta::Threads::Runs::MessageCreationStepDetails } # Details of the tool call. - variant :tool_calls, -> { OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails } + variant :tool_calls, -> { OpenAI::Beta::Threads::Runs::ToolCallsStepDetails } + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails)] end - # @abstract - # # The type of run step, which can be either `message_creation` or `tool_calls`. - class Type < OpenAI::Enum + # + # @see OpenAI::Models::Beta::Threads::Runs::RunStep#type + module Type + extend OpenAI::Internal::Type::Enum + MESSAGE_CREATION = :message_creation TOOL_CALLS = :tool_calls - finalize! + # @!method self.values + # @return [Array] end - class Usage < OpenAI::BaseModel + # @see OpenAI::Models::Beta::Threads::Runs::RunStep#usage + class Usage < OpenAI::Internal::Type::BaseModel # @!attribute completion_tokens # Number of completion tokens used over the course of the run step. # @@ -250,17 +258,15 @@ class Usage < OpenAI::BaseModel # @return [Integer] required :total_tokens, Integer - # @!parse - # # Usage statistics related to the run step. This value will be `null` while the - # # run step's status is `in_progress`. - # # - # # @param completion_tokens [Integer] - # # @param prompt_tokens [Integer] - # # @param total_tokens [Integer] - # # - # def initialize(completion_tokens:, prompt_tokens:, total_tokens:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(completion_tokens:, prompt_tokens:, total_tokens:) + # Usage statistics related to the run step. This value will be `null` while the + # run step's status is `in_progress`. + # + # @param completion_tokens [Integer] Number of completion tokens used over the course of the run step. + # + # @param prompt_tokens [Integer] Number of prompt tokens used over the course of the run step. + # + # @param total_tokens [Integer] Total number of tokens used (prompt + completion). end end end diff --git a/lib/openai/models/beta/threads/runs/run_step_delta.rb b/lib/openai/models/beta/threads/runs/run_step_delta.rb index 3f19839e..3dd76094 100644 --- a/lib/openai/models/beta/threads/runs/run_step_delta.rb +++ b/lib/openai/models/beta/threads/runs/run_step_delta.rb @@ -5,37 +5,34 @@ module Models module Beta module Threads module Runs - class RunStepDelta < OpenAI::BaseModel - # @!attribute [r] step_details + class RunStepDelta < OpenAI::Internal::Type::BaseModel + # @!attribute step_details # The details of the run step. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject, nil] - optional :step_details, union: -> { OpenAI::Models::Beta::Threads::Runs::RunStepDelta::StepDetails } + optional :step_details, union: -> { OpenAI::Beta::Threads::Runs::RunStepDelta::StepDetails } - # @!parse - # # @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject] - # attr_writer :step_details - - # @!parse - # # The delta containing the fields that have changed on the run step. - # # - # # @param step_details [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject] - # # - # def initialize(step_details: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(step_details: nil) + # The delta containing the fields that have changed on the run step. # + # @param step_details [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject] The details of the run step. + # The details of the run step. - class StepDetails < OpenAI::Union + # + # @see OpenAI::Models::Beta::Threads::Runs::RunStepDelta#step_details + module StepDetails + extend OpenAI::Internal::Type::Union + discriminator :type # Details of the message creation by the run step. - variant :message_creation, -> { OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta } + variant :message_creation, -> { OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta } # Details of the tool call. - variant :tool_calls, -> { OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject } + variant :tool_calls, -> { OpenAI::Beta::Threads::Runs::ToolCallDeltaObject } + + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject)] end end end diff --git a/lib/openai/models/beta/threads/runs/run_step_delta_event.rb b/lib/openai/models/beta/threads/runs/run_step_delta_event.rb index fd2e6642..abca9d4b 100644 --- a/lib/openai/models/beta/threads/runs/run_step_delta_event.rb +++ b/lib/openai/models/beta/threads/runs/run_step_delta_event.rb @@ -5,7 +5,7 @@ module Models module Beta module Threads module Runs - class RunStepDeltaEvent < OpenAI::BaseModel + class RunStepDeltaEvent < OpenAI::Internal::Type::BaseModel # @!attribute id # The identifier of the run step, which can be referenced in API endpoints. # @@ -16,7 +16,7 @@ class RunStepDeltaEvent < OpenAI::BaseModel # The delta containing the fields that have changed on the run step. # # @return [OpenAI::Models::Beta::Threads::Runs::RunStepDelta] - required :delta, -> { OpenAI::Models::Beta::Threads::Runs::RunStepDelta } + required :delta, -> { OpenAI::Beta::Threads::Runs::RunStepDelta } # @!attribute object # The object type, which is always `thread.run.step.delta`. @@ -24,17 +24,15 @@ class RunStepDeltaEvent < OpenAI::BaseModel # @return [Symbol, :"thread.run.step.delta"] required :object, const: :"thread.run.step.delta" - # @!parse - # # Represents a run step delta i.e. any changed fields on a run step during - # # streaming. - # # - # # @param id [String] - # # @param delta [OpenAI::Models::Beta::Threads::Runs::RunStepDelta] - # # @param object [Symbol, :"thread.run.step.delta"] - # # - # def initialize(id:, delta:, object: :"thread.run.step.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, delta:, object: :"thread.run.step.delta") + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. + # + # @param id [String] The identifier of the run step, which can be referenced in API endpoints. + # + # @param delta [OpenAI::Models::Beta::Threads::Runs::RunStepDelta] The delta containing the fields that have changed on the run step. + # + # @param object [Symbol, :"thread.run.step.delta"] The object type, which is always `thread.run.step.delta`. end end diff --git a/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rb b/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rb index 4c2d9170..856e1c65 100644 --- a/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rb +++ b/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rb @@ -5,50 +5,35 @@ module Models module Beta module Threads module Runs - class RunStepDeltaMessageDelta < OpenAI::BaseModel + class RunStepDeltaMessageDelta < OpenAI::Internal::Type::BaseModel # @!attribute type # Always `message_creation`. # # @return [Symbol, :message_creation] required :type, const: :message_creation - # @!attribute [r] message_creation + # @!attribute message_creation # # @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation, nil] - optional :message_creation, - -> { OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation } + optional :message_creation, -> { OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation } - # @!parse - # # @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation] - # attr_writer :message_creation - - # @!parse - # # Details of the message creation by the run step. - # # - # # @param message_creation [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation] - # # @param type [Symbol, :message_creation] - # # - # def initialize(message_creation: nil, type: :message_creation, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(message_creation: nil, type: :message_creation) + # Details of the message creation by the run step. + # + # @param message_creation [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation] + # + # @param type [Symbol, :message_creation] Always `message_creation`. - class MessageCreation < OpenAI::BaseModel - # @!attribute [r] message_id + # @see OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta#message_creation + class MessageCreation < OpenAI::Internal::Type::BaseModel + # @!attribute message_id # The ID of the message that was created by this run step. # # @return [String, nil] optional :message_id, String - # @!parse - # # @return [String] - # attr_writer :message_id - - # @!parse - # # @param message_id [String] - # # - # def initialize(message_id: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(message_id: nil) + # @param message_id [String] The ID of the message that was created by this run step. end end end diff --git a/lib/openai/models/beta/threads/runs/run_step_include.rb b/lib/openai/models/beta/threads/runs/run_step_include.rb index 1d4c531e..5253cc7e 100644 --- a/lib/openai/models/beta/threads/runs/run_step_include.rb +++ b/lib/openai/models/beta/threads/runs/run_step_include.rb @@ -5,12 +5,14 @@ module Models module Beta module Threads module Runs - # @abstract - # - class RunStepInclude < OpenAI::Enum - STEP_DETAILS_TOOL_CALLS_FILE_SEARCH_RESULTS_CONTENT = :"step_details.tool_calls[*].file_search.results[*].content" + module RunStepInclude + extend OpenAI::Internal::Type::Enum - finalize! + STEP_DETAILS_TOOL_CALLS_FILE_SEARCH_RESULTS_CONTENT = + :"step_details.tool_calls[*].file_search.results[*].content" + + # @!method self.values + # @return [Array] end end diff --git a/lib/openai/models/beta/threads/runs/step_list_params.rb b/lib/openai/models/beta/threads/runs/step_list_params.rb index 685e0e75..2a263bdc 100644 --- a/lib/openai/models/beta/threads/runs/step_list_params.rb +++ b/lib/openai/models/beta/threads/runs/step_list_params.rb @@ -5,102 +5,89 @@ module Models module Beta module Threads module Runs - class StepListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Runs::Steps#list + class StepListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute thread_id # # @return [String] required :thread_id, String - # @!attribute [r] after + # @!attribute after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] before + # @!attribute before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String - # @!parse - # # @return [String] - # attr_writer :before - - # @!attribute [r] include + # @!attribute include # A list of additional fields to include in the response. Currently the only - # supported value is `step_details.tool_calls[*].file_search.results[*].content` - # to fetch the file search result content. + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # # @return [Array, nil] - optional :include, -> { OpenAI::ArrayOf[enum: OpenAI::Models::Beta::Threads::Runs::RunStepInclude] } - - # @!parse - # # @return [Array] - # attr_writer :include + optional :include, + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Beta::Threads::Runs::RunStepInclude] } - # @!attribute [r] limit + # @!attribute limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!attribute [r] order + # @!attribute order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::StepListParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::Beta::Threads::Runs::StepListParams::Order } + optional :order, enum: -> { OpenAI::Beta::Threads::Runs::StepListParams::Order } - # @!parse - # # @return [Symbol, OpenAI::Models::Beta::Threads::Runs::StepListParams::Order] - # attr_writer :order - - # @!parse - # # @param thread_id [String] - # # @param after [String] - # # @param before [String] - # # @param include [Array] - # # @param limit [Integer] - # # @param order [Symbol, OpenAI::Models::Beta::Threads::Runs::StepListParams::Order] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(thread_id:, after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(thread_id:, after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::StepListParams} for more details. + # + # @param thread_id [String] # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place + # + # @param include [Array] A list of additional fields to include in the response. Currently the only suppo + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::Beta::Threads::Runs::StepListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. - class Order < OpenAI::Enum + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/beta/threads/runs/step_retrieve_params.rb b/lib/openai/models/beta/threads/runs/step_retrieve_params.rb index 5329f9d4..349177b1 100644 --- a/lib/openai/models/beta/threads/runs/step_retrieve_params.rb +++ b/lib/openai/models/beta/threads/runs/step_retrieve_params.rb @@ -5,10 +5,10 @@ module Models module Beta module Threads module Runs - class StepRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Beta::Threads::Runs::Steps#retrieve + class StepRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute thread_id # @@ -20,31 +20,30 @@ class StepRetrieveParams < OpenAI::BaseModel # @return [String] required :run_id, String - # @!attribute [r] include + # @!attribute include # A list of additional fields to include in the response. Currently the only - # supported value is `step_details.tool_calls[*].file_search.results[*].content` - # to fetch the file search result content. + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. # # @return [Array, nil] - optional :include, -> { OpenAI::ArrayOf[enum: OpenAI::Models::Beta::Threads::Runs::RunStepInclude] } + optional :include, + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Beta::Threads::Runs::RunStepInclude] } - # @!parse - # # @return [Array] - # attr_writer :include - - # @!parse - # # @param thread_id [String] - # # @param run_id [String] - # # @param include [Array] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(thread_id:, run_id:, include: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(thread_id:, run_id:, include: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams} for more details. + # + # @param thread_id [String] + # + # @param run_id [String] + # + # @param include [Array] A list of additional fields to include in the response. Currently the only suppo + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/beta/threads/runs/tool_call.rb b/lib/openai/models/beta/threads/runs/tool_call.rb index d5d780ad..eb99ad15 100644 --- a/lib/openai/models/beta/threads/runs/tool_call.rb +++ b/lib/openai/models/beta/threads/runs/tool_call.rb @@ -5,18 +5,21 @@ module Models module Beta module Threads module Runs - # @abstract - # # Details of the Code Interpreter tool call the run step was involved in. - class ToolCall < OpenAI::Union + module ToolCall + extend OpenAI::Internal::Type::Union + discriminator :type # Details of the Code Interpreter tool call the run step was involved in. - variant :code_interpreter, -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall } + variant :code_interpreter, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall } + + variant :file_search, -> { OpenAI::Beta::Threads::Runs::FileSearchToolCall } - variant :file_search, -> { OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall } + variant :function, -> { OpenAI::Beta::Threads::Runs::FunctionToolCall } - variant :function, -> { OpenAI::Models::Beta::Threads::Runs::FunctionToolCall } + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, OpenAI::Models::Beta::Threads::Runs::FunctionToolCall)] end end end diff --git a/lib/openai/models/beta/threads/runs/tool_call_delta.rb b/lib/openai/models/beta/threads/runs/tool_call_delta.rb index a1b92351..14cdb2c1 100644 --- a/lib/openai/models/beta/threads/runs/tool_call_delta.rb +++ b/lib/openai/models/beta/threads/runs/tool_call_delta.rb @@ -5,18 +5,21 @@ module Models module Beta module Threads module Runs - # @abstract - # # Details of the Code Interpreter tool call the run step was involved in. - class ToolCallDelta < OpenAI::Union + module ToolCallDelta + extend OpenAI::Internal::Type::Union + discriminator :type # Details of the Code Interpreter tool call the run step was involved in. - variant :code_interpreter, -> { OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta } + variant :code_interpreter, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta } + + variant :file_search, -> { OpenAI::Beta::Threads::Runs::FileSearchToolCallDelta } - variant :file_search, -> { OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta } + variant :function, -> { OpenAI::Beta::Threads::Runs::FunctionToolCallDelta } - variant :function, -> { OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta } + # @!method self.variants + # @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta)] end end end diff --git a/lib/openai/models/beta/threads/runs/tool_call_delta_object.rb b/lib/openai/models/beta/threads/runs/tool_call_delta_object.rb index b9e864cd..c6900dcc 100644 --- a/lib/openai/models/beta/threads/runs/tool_call_delta_object.rb +++ b/lib/openai/models/beta/threads/runs/tool_call_delta_object.rb @@ -5,34 +5,31 @@ module Models module Beta module Threads module Runs - class ToolCallDeltaObject < OpenAI::BaseModel + class ToolCallDeltaObject < OpenAI::Internal::Type::BaseModel # @!attribute type # Always `tool_calls`. # # @return [Symbol, :tool_calls] required :type, const: :tool_calls - # @!attribute [r] tool_calls + # @!attribute tool_calls # An array of tool calls the run step was involved in. These can be associated - # with one of three types of tools: `code_interpreter`, `file_search`, or - # `function`. + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. # # @return [Array, nil] - optional :tool_calls, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::Runs::ToolCallDelta] } + optional :tool_calls, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Runs::ToolCallDelta] } - # @!parse - # # @return [Array] - # attr_writer :tool_calls - - # @!parse - # # Details of the tool call. - # # - # # @param tool_calls [Array] - # # @param type [Symbol, :tool_calls] - # # - # def initialize(tool_calls: nil, type: :tool_calls, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(tool_calls: nil, type: :tool_calls) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject} for more details. + # + # Details of the tool call. + # + # @param tool_calls [Array] An array of tool calls the run step was involved in. These can be associated wit + # + # @param type [Symbol, :tool_calls] Always `tool_calls`. end end end diff --git a/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb b/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb index b833e0ff..d83defac 100644 --- a/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb +++ b/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb @@ -5,14 +5,14 @@ module Models module Beta module Threads module Runs - class ToolCallsStepDetails < OpenAI::BaseModel + class ToolCallsStepDetails < OpenAI::Internal::Type::BaseModel # @!attribute tool_calls # An array of tool calls the run step was involved in. These can be associated - # with one of three types of tools: `code_interpreter`, `file_search`, or - # `function`. + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. # # @return [Array] - required :tool_calls, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::Runs::ToolCall] } + required :tool_calls, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Runs::ToolCall] } # @!attribute type # Always `tool_calls`. @@ -20,15 +20,15 @@ class ToolCallsStepDetails < OpenAI::BaseModel # @return [Symbol, :tool_calls] required :type, const: :tool_calls - # @!parse - # # Details of the tool call. - # # - # # @param tool_calls [Array] - # # @param type [Symbol, :tool_calls] - # # - # def initialize(tool_calls:, type: :tool_calls, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(tool_calls:, type: :tool_calls) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails} for more details. + # + # Details of the tool call. + # + # @param tool_calls [Array] An array of tool calls the run step was involved in. These can be associated wit + # + # @param type [Symbol, :tool_calls] Always `tool_calls`. end end end diff --git a/lib/openai/models/beta/threads/text.rb b/lib/openai/models/beta/threads/text.rb index f3b930f5..3e914e18 100644 --- a/lib/openai/models/beta/threads/text.rb +++ b/lib/openai/models/beta/threads/text.rb @@ -4,11 +4,11 @@ module OpenAI module Models module Beta module Threads - class Text < OpenAI::BaseModel + class Text < OpenAI::Internal::Type::BaseModel # @!attribute annotations # # @return [Array] - required :annotations, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::Annotation] } + required :annotations, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Annotation] } # @!attribute value # The data that makes up the text. @@ -16,13 +16,10 @@ class Text < OpenAI::BaseModel # @return [String] required :value, String - # @!parse - # # @param annotations [Array] - # # @param value [String] - # # - # def initialize(annotations:, value:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(annotations:, value:) + # @param annotations [Array] + # + # @param value [String] The data that makes up the text. end end end diff --git a/lib/openai/models/beta/threads/text_content_block.rb b/lib/openai/models/beta/threads/text_content_block.rb index b70b93fc..c1c391fa 100644 --- a/lib/openai/models/beta/threads/text_content_block.rb +++ b/lib/openai/models/beta/threads/text_content_block.rb @@ -4,11 +4,11 @@ module OpenAI module Models module Beta module Threads - class TextContentBlock < OpenAI::BaseModel + class TextContentBlock < OpenAI::Internal::Type::BaseModel # @!attribute text # # @return [OpenAI::Models::Beta::Threads::Text] - required :text, -> { OpenAI::Models::Beta::Threads::Text } + required :text, -> { OpenAI::Beta::Threads::Text } # @!attribute type # Always `text`. @@ -16,15 +16,12 @@ class TextContentBlock < OpenAI::BaseModel # @return [Symbol, :text] required :type, const: :text - # @!parse - # # The text content that is part of a message. - # # - # # @param text [OpenAI::Models::Beta::Threads::Text] - # # @param type [Symbol, :text] - # # - # def initialize(text:, type: :text, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(text:, type: :text) + # The text content that is part of a message. + # + # @param text [OpenAI::Models::Beta::Threads::Text] + # + # @param type [Symbol, :text] Always `text`. end end end diff --git a/lib/openai/models/beta/threads/text_content_block_param.rb b/lib/openai/models/beta/threads/text_content_block_param.rb index ce067a8c..89ff6435 100644 --- a/lib/openai/models/beta/threads/text_content_block_param.rb +++ b/lib/openai/models/beta/threads/text_content_block_param.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class TextContentBlockParam < OpenAI::BaseModel + class TextContentBlockParam < OpenAI::Internal::Type::BaseModel # @!attribute text # Text content to be sent to the model # @@ -17,15 +17,12 @@ class TextContentBlockParam < OpenAI::BaseModel # @return [Symbol, :text] required :type, const: :text - # @!parse - # # The text content that is part of a message. - # # - # # @param text [String] - # # @param type [Symbol, :text] - # # - # def initialize(text:, type: :text, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(text:, type: :text) + # The text content that is part of a message. + # + # @param text [String] Text content to be sent to the model + # + # @param type [Symbol, :text] Always `text`. end end end diff --git a/lib/openai/models/beta/threads/text_delta.rb b/lib/openai/models/beta/threads/text_delta.rb index ee2b9db4..ef33693b 100644 --- a/lib/openai/models/beta/threads/text_delta.rb +++ b/lib/openai/models/beta/threads/text_delta.rb @@ -4,33 +4,23 @@ module OpenAI module Models module Beta module Threads - class TextDelta < OpenAI::BaseModel - # @!attribute [r] annotations + class TextDelta < OpenAI::Internal::Type::BaseModel + # @!attribute annotations # # @return [Array, nil] - optional :annotations, -> { OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::AnnotationDelta] } + optional :annotations, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::AnnotationDelta] } - # @!parse - # # @return [Array] - # attr_writer :annotations - - # @!attribute [r] value + # @!attribute value # The data that makes up the text. # # @return [String, nil] optional :value, String - # @!parse - # # @return [String] - # attr_writer :value - - # @!parse - # # @param annotations [Array] - # # @param value [String] - # # - # def initialize(annotations: nil, value: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(annotations: nil, value: nil) + # @param annotations [Array] + # + # @param value [String] The data that makes up the text. end end end diff --git a/lib/openai/models/beta/threads/text_delta_block.rb b/lib/openai/models/beta/threads/text_delta_block.rb index a06f3595..f55a50f7 100644 --- a/lib/openai/models/beta/threads/text_delta_block.rb +++ b/lib/openai/models/beta/threads/text_delta_block.rb @@ -4,7 +4,7 @@ module OpenAI module Models module Beta module Threads - class TextDeltaBlock < OpenAI::BaseModel + class TextDeltaBlock < OpenAI::Internal::Type::BaseModel # @!attribute index # The index of the content part in the message. # @@ -17,25 +17,19 @@ class TextDeltaBlock < OpenAI::BaseModel # @return [Symbol, :text] required :type, const: :text - # @!attribute [r] text + # @!attribute text # # @return [OpenAI::Models::Beta::Threads::TextDelta, nil] - optional :text, -> { OpenAI::Models::Beta::Threads::TextDelta } + optional :text, -> { OpenAI::Beta::Threads::TextDelta } - # @!parse - # # @return [OpenAI::Models::Beta::Threads::TextDelta] - # attr_writer :text - - # @!parse - # # The text content that is part of a message. - # # - # # @param index [Integer] - # # @param text [OpenAI::Models::Beta::Threads::TextDelta] - # # @param type [Symbol, :text] - # # - # def initialize(index:, text: nil, type: :text, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, text: nil, type: :text) + # The text content that is part of a message. + # + # @param index [Integer] The index of the content part in the message. + # + # @param text [OpenAI::Models::Beta::Threads::TextDelta] + # + # @param type [Symbol, :text] Always `text`. end end end diff --git a/lib/openai/models/chat/chat_completion.rb b/lib/openai/models/chat/chat_completion.rb index b80607a6..c288fe61 100644 --- a/lib/openai/models/chat/chat_completion.rb +++ b/lib/openai/models/chat/chat_completion.rb @@ -3,7 +3,10 @@ module OpenAI module Models module Chat - class ChatCompletion < OpenAI::BaseModel + # @see OpenAI::Resources::Chat::Completions#create + # + # @see OpenAI::Resources::Chat::Completions#stream_raw + class ChatCompletion < OpenAI::Internal::Type::BaseModel # @!attribute id # A unique identifier for the chat completion. # @@ -12,10 +15,10 @@ class ChatCompletion < OpenAI::BaseModel # @!attribute choices # A list of chat completion choices. Can be more than one if `n` is greater - # than 1. + # than 1. # # @return [Array] - required :choices, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletion::Choice] } + required :choices, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletion::Choice] } # @!attribute created # The Unix timestamp (in seconds) of when the chat completion was created. @@ -36,74 +39,77 @@ class ChatCompletion < OpenAI::BaseModel required :object, const: :"chat.completion" # @!attribute service_tier - # The service tier used for processing the request. + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil] - optional :service_tier, enum: -> { OpenAI::Models::Chat::ChatCompletion::ServiceTier }, nil?: true + optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletion::ServiceTier }, nil?: true - # @!attribute [r] system_fingerprint + # @!attribute system_fingerprint + # @deprecated + # # This fingerprint represents the backend configuration that the model runs with. # - # Can be used in conjunction with the `seed` request parameter to understand when - # backend changes have been made that might impact determinism. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. # # @return [String, nil] optional :system_fingerprint, String - # @!parse - # # @return [String] - # attr_writer :system_fingerprint - - # @!attribute [r] usage + # @!attribute usage # Usage statistics for the completion request. # # @return [OpenAI::Models::CompletionUsage, nil] - optional :usage, -> { OpenAI::Models::CompletionUsage } - - # @!parse - # # @return [OpenAI::Models::CompletionUsage] - # attr_writer :usage - - # @!parse - # # Represents a chat completion response returned by model, based on the provided - # # input. - # # - # # @param id [String] - # # @param choices [Array] - # # @param created [Integer] - # # @param model [String] - # # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil] - # # @param system_fingerprint [String] - # # @param usage [OpenAI::Models::CompletionUsage] - # # @param object [Symbol, :"chat.completion"] - # # - # def initialize( - # id:, - # choices:, - # created:, - # model:, - # service_tier: nil, - # system_fingerprint: nil, - # usage: nil, - # object: :"chat.completion", - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Choice < OpenAI::BaseModel + optional :usage, -> { OpenAI::CompletionUsage } + + # @!method initialize(id:, choices:, created:, model:, service_tier: nil, system_fingerprint: nil, usage: nil, object: :"chat.completion") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletion} for more details. + # + # Represents a chat completion response returned by model, based on the provided + # input. + # + # @param id [String] A unique identifier for the chat completion. + # + # @param choices [Array] A list of chat completion choices. Can be more than one if `n` is greater than 1 + # + # @param created [Integer] The Unix timestamp (in seconds) of when the chat completion was created. + # + # @param model [String] The model used for the chat completion. + # + # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil] Specifies the processing type used for serving the request. + # + # @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with. + # + # @param usage [OpenAI::Models::CompletionUsage] Usage statistics for the completion request. + # + # @param object [Symbol, :"chat.completion"] The object type, which is always `chat.completion`. + + class Choice < OpenAI::Internal::Type::BaseModel # @!attribute finish_reason # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletion::Choice::FinishReason] - required :finish_reason, enum: -> { OpenAI::Models::Chat::ChatCompletion::Choice::FinishReason } + required :finish_reason, enum: -> { OpenAI::Chat::ChatCompletion::Choice::FinishReason } # @!attribute index # The index of the choice in the list of choices. @@ -115,75 +121,103 @@ class Choice < OpenAI::BaseModel # Log probability information for the choice. # # @return [OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs, nil] - required :logprobs, -> { OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs }, nil?: true + required :logprobs, -> { OpenAI::Chat::ChatCompletion::Choice::Logprobs }, nil?: true # @!attribute message # A chat completion message generated by the model. # # @return [OpenAI::Models::Chat::ChatCompletionMessage] - required :message, -> { OpenAI::Models::Chat::ChatCompletionMessage } - - # @!parse - # # @param finish_reason [Symbol, OpenAI::Models::Chat::ChatCompletion::Choice::FinishReason] - # # @param index [Integer] - # # @param logprobs [OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs, nil] - # # @param message [OpenAI::Models::Chat::ChatCompletionMessage] - # # - # def initialize(finish_reason:, index:, logprobs:, message:, **) = super + required :message, -> { OpenAI::Chat::ChatCompletionMessage } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(finish_reason:, index:, logprobs:, message:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletion::Choice} for more details. + # + # @param finish_reason [Symbol, OpenAI::Models::Chat::ChatCompletion::Choice::FinishReason] The reason the model stopped generating tokens. This will be `stop` if the model + # + # @param index [Integer] The index of the choice in the list of choices. # + # @param logprobs [OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs, nil] Log probability information for the choice. + # + # @param message [OpenAI::Models::Chat::ChatCompletionMessage] A chat completion message generated by the model. + # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. - class FinishReason < OpenAI::Enum + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. + # + # @see OpenAI::Models::Chat::ChatCompletion::Choice#finish_reason + module FinishReason + extend OpenAI::Internal::Type::Enum + STOP = :stop LENGTH = :length TOOL_CALLS = :tool_calls CONTENT_FILTER = :content_filter FUNCTION_CALL = :function_call - finalize! + # @!method self.values + # @return [Array] end - class Logprobs < OpenAI::BaseModel + # @see OpenAI::Models::Chat::ChatCompletion::Choice#logprobs + class Logprobs < OpenAI::Internal::Type::BaseModel # @!attribute content # A list of message content tokens with log probability information. # # @return [Array, nil] - required :content, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionTokenLogprob] }, nil?: true + required :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] }, + nil?: true # @!attribute refusal # A list of message refusal tokens with log probability information. # # @return [Array, nil] - required :refusal, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionTokenLogprob] }, nil?: true - - # @!parse - # # Log probability information for the choice. - # # - # # @param content [Array, nil] - # # @param refusal [Array, nil] - # # - # def initialize(content:, refusal:, **) = super + required :refusal, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] }, + nil?: true - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(content:, refusal:) + # Log probability information for the choice. + # + # @param content [Array, nil] A list of message content tokens with log probability information. + # + # @param refusal [Array, nil] A list of message refusal tokens with log probability information. end end - # @abstract + # Specifies the processing type used for serving the request. # - # The service tier used for processing the request. - class ServiceTier < OpenAI::Enum - SCALE = :scale + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + # + # @see OpenAI::Models::Chat::ChatCompletion#service_tier + module ServiceTier + extend OpenAI::Internal::Type::Enum + + AUTO = :auto DEFAULT = :default + FLEX = :flex + SCALE = :scale + PRIORITY = :priority - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb b/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb new file mode 100644 index 00000000..bc81ddfb --- /dev/null +++ b/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel + # @!attribute allowed_tools + # Constrains the tools available to the model to a pre-defined set. + # + # @return [OpenAI::Models::Chat::ChatCompletionAllowedTools] + required :allowed_tools, -> { OpenAI::Chat::ChatCompletionAllowedTools } + + # @!attribute type + # Allowed tool configuration type. Always `allowed_tools`. + # + # @return [Symbol, :allowed_tools] + required :type, const: :allowed_tools + + # @!method initialize(allowed_tools:, type: :allowed_tools) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionAllowedToolChoice} for more details. + # + # Constrains the tools available to the model to a pre-defined set. + # + # @param allowed_tools [OpenAI::Models::Chat::ChatCompletionAllowedTools] Constrains the tools available to the model to a pre-defined set. + # + # @param type [Symbol, :allowed_tools] Allowed tool configuration type. Always `allowed_tools`. + end + end + + ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice + end +end diff --git a/lib/openai/models/chat/chat_completion_allowed_tools.rb b/lib/openai/models/chat/chat_completion_allowed_tools.rb new file mode 100644 index 00000000..59d2c94b --- /dev/null +++ b/lib/openai/models/chat/chat_completion_allowed_tools.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionAllowedTools < OpenAI::Internal::Type::BaseModel + # @!attribute mode + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + # + # @return [Symbol, OpenAI::Models::Chat::ChatCompletionAllowedTools::Mode] + required :mode, enum: -> { OpenAI::Chat::ChatCompletionAllowedTools::Mode } + + # @!attribute tools + # A list of tool definitions that the model should be allowed to call. + # + # For the Chat Completions API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "function": { "name": "get_weather" } }, + # { "type": "function", "function": { "name": "get_time" } } + # ] + # ``` + # + # @return [ArrayObject}>] + required :tools, + OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]] + + # @!method initialize(mode:, tools:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionAllowedTools} for more details. + # + # Constrains the tools available to the model to a pre-defined set. + # + # @param mode [Symbol, OpenAI::Models::Chat::ChatCompletionAllowedTools::Mode] Constrains the tools available to the model to a pre-defined set. + # + # @param tools [ArrayObject}>] A list of tool definitions that the model should be allowed to call. + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + # + # @see OpenAI::Models::Chat::ChatCompletionAllowedTools#mode + module Mode + extend OpenAI::Internal::Type::Enum + + AUTO = :auto + REQUIRED = :required + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/chat/chat_completion_assistant_message_param.rb b/lib/openai/models/chat/chat_completion_assistant_message_param.rb index 7dcbccab..ddff94b5 100644 --- a/lib/openai/models/chat/chat_completion_assistant_message_param.rb +++ b/lib/openai/models/chat/chat_completion_assistant_message_param.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Chat - class ChatCompletionAssistantMessageParam < OpenAI::BaseModel + class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute role # The role of the messages author, in this case `assistant`. # @@ -12,135 +12,140 @@ class ChatCompletionAssistantMessageParam < OpenAI::BaseModel # @!attribute audio # Data about a previous audio response from the model. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # [Learn more](https://platform.openai.com/docs/guides/audio). # # @return [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio, nil] - optional :audio, -> { OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio }, nil?: true + optional :audio, -> { OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio }, nil?: true # @!attribute content # The contents of the assistant message. Required unless `tool_calls` or - # `function_call` is specified. + # `function_call` is specified. # # @return [String, Array, nil] optional :content, - union: -> { OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content }, + union: -> { + OpenAI::Chat::ChatCompletionAssistantMessageParam::Content + }, nil?: true # @!attribute function_call + # @deprecated + # # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. # # @return [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall, nil] optional :function_call, - -> { OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall }, + -> { OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall }, nil?: true - # @!attribute [r] name + # @!attribute name # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. # # @return [String, nil] optional :name, String - # @!parse - # # @return [String] - # attr_writer :name - # @!attribute refusal # The refusal message by the assistant. # # @return [String, nil] optional :refusal, String, nil?: true - # @!attribute [r] tool_calls + # @!attribute tool_calls # The tool calls generated by the model, such as function calls. # - # @return [Array, nil] - optional :tool_calls, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionMessageToolCall] } - - # @!parse - # # @return [Array] - # attr_writer :tool_calls - - # @!parse - # # Messages sent by the model in response to user messages. - # # - # # @param audio [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio, nil] - # # @param content [String, Array, nil] - # # @param function_call [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall, nil] - # # @param name [String] - # # @param refusal [String, nil] - # # @param tool_calls [Array] - # # @param role [Symbol, :assistant] - # # - # def initialize( - # audio: nil, - # content: nil, - # function_call: nil, - # name: nil, - # refusal: nil, - # tool_calls: nil, - # role: :assistant, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Audio < OpenAI::BaseModel + # @return [Array, nil] + optional :tool_calls, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionMessageToolCall] } + + # @!method initialize(audio: nil, content: nil, function_call: nil, name: nil, refusal: nil, tool_calls: nil, role: :assistant) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionAssistantMessageParam} for more details. + # + # Messages sent by the model in response to user messages. + # + # @param audio [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio, nil] Data about a previous audio response from the model. + # + # @param content [String, Array, nil] The contents of the assistant message. Required unless `tool_calls` or `function + # + # @param function_call [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall, nil] Deprecated and replaced by `tool_calls`. The name and arguments of a function th + # + # @param name [String] An optional name for the participant. Provides the model information to differen + # + # @param refusal [String, nil] The refusal message by the assistant. + # + # @param tool_calls [Array] The tool calls generated by the model, such as function calls. + # + # @param role [Symbol, :assistant] The role of the messages author, in this case `assistant`. + + # @see OpenAI::Models::Chat::ChatCompletionAssistantMessageParam#audio + class Audio < OpenAI::Internal::Type::BaseModel # @!attribute id # Unique identifier for a previous audio response from the model. # # @return [String] required :id, String - # @!parse - # # Data about a previous audio response from the model. - # # [Learn more](https://platform.openai.com/docs/guides/audio). - # # - # # @param id [String] - # # - # def initialize(id:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio} for more + # details. + # + # Data about a previous audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). + # + # @param id [String] Unique identifier for a previous audio response from the model. end - # @abstract - # # The contents of the assistant message. Required unless `tool_calls` or - # `function_call` is specified. - class Content < OpenAI::Union - ArrayOfContentPartArray = OpenAI::ArrayOf[union: -> { OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::ArrayOfContentPart }] + # `function_call` is specified. + # + # @see OpenAI::Models::Chat::ChatCompletionAssistantMessageParam#content + module Content + extend OpenAI::Internal::Type::Union # The contents of the assistant message. variant String # An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. - variant OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::ArrayOfContentPartArray + variant -> { OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::ArrayOfContentPartArray } - # @abstract - # # Learn about - # [text inputs](https://platform.openai.com/docs/guides/text-generation). - class ArrayOfContentPart < OpenAI::Union + # [text inputs](https://platform.openai.com/docs/guides/text-generation). + module ArrayOfContentPart + extend OpenAI::Internal::Type::Union + discriminator :type # Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation). - variant :text, -> { OpenAI::Models::Chat::ChatCompletionContentPartText } + variant :text, -> { OpenAI::Chat::ChatCompletionContentPartText } - variant :refusal, -> { OpenAI::Models::Chat::ChatCompletionContentPartRefusal } + variant :refusal, -> { OpenAI::Chat::ChatCompletionContentPartRefusal } + + # @!method self.variants + # @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartRefusal)] end + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + ArrayOfContentPartArray = + OpenAI::Internal::Type::ArrayOf[union: -> { + OpenAI::Chat::ChatCompletionAssistantMessageParam::Content::ArrayOfContentPart + }] end # @deprecated # - class FunctionCall < OpenAI::BaseModel + # @see OpenAI::Models::Chat::ChatCompletionAssistantMessageParam#function_call + class FunctionCall < OpenAI::Internal::Type::BaseModel # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. # # @return [String] required :arguments, String @@ -151,16 +156,17 @@ class FunctionCall < OpenAI::BaseModel # @return [String] required :name, String - # @!parse - # # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # # that should be called, as generated by the model. - # # - # # @param arguments [String] - # # @param name [String] - # # - # def initialize(arguments:, name:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(arguments:, name:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall} for + # more details. + # + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + # + # @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma + # + # @param name [String] The name of the function to call. end end end diff --git a/lib/openai/models/chat/chat_completion_audio.rb b/lib/openai/models/chat/chat_completion_audio.rb index 2a8b5bca..fae460ec 100644 --- a/lib/openai/models/chat/chat_completion_audio.rb +++ b/lib/openai/models/chat/chat_completion_audio.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Chat - class ChatCompletionAudio < OpenAI::BaseModel + class ChatCompletionAudio < OpenAI::Internal::Type::BaseModel # @!attribute id # Unique identifier for this audio response. # @@ -12,14 +12,14 @@ class ChatCompletionAudio < OpenAI::BaseModel # @!attribute data # Base64 encoded audio bytes generated by the model, in the format specified in - # the request. + # the request. # # @return [String] required :data, String # @!attribute expires_at # The Unix timestamp (in seconds) for when this audio response will no longer be - # accessible on the server for use in multi-turn conversations. + # accessible on the server for use in multi-turn conversations. # # @return [Integer] required :expires_at, Integer @@ -30,19 +30,21 @@ class ChatCompletionAudio < OpenAI::BaseModel # @return [String] required :transcript, String - # @!parse - # # If the audio output modality is requested, this object contains data about the - # # audio response from the model. - # # [Learn more](https://platform.openai.com/docs/guides/audio). - # # - # # @param id [String] - # # @param data [String] - # # @param expires_at [Integer] - # # @param transcript [String] - # # - # def initialize(id:, data:, expires_at:, transcript:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, data:, expires_at:, transcript:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionAudio} for more details. + # + # If the audio output modality is requested, this object contains data about the + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). + # + # @param id [String] Unique identifier for this audio response. + # + # @param data [String] Base64 encoded audio bytes generated by the model, in the format + # + # @param expires_at [Integer] The Unix timestamp (in seconds) for when this audio response will + # + # @param transcript [String] Transcript of the audio generated by the model. end end diff --git a/lib/openai/models/chat/chat_completion_audio_param.rb b/lib/openai/models/chat/chat_completion_audio_param.rb index 5d72b2de..04df08e2 100644 --- a/lib/openai/models/chat/chat_completion_audio_param.rb +++ b/lib/openai/models/chat/chat_completion_audio_param.rb @@ -3,52 +3,85 @@ module OpenAI module Models module Chat - class ChatCompletionAudioParam < OpenAI::BaseModel + class ChatCompletionAudioParam < OpenAI::Internal::Type::BaseModel # @!attribute format_ # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, - # or `pcm16`. + # or `pcm16`. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Format] - required :format_, enum: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Format }, api_name: :format + required :format_, enum: -> { OpenAI::Chat::ChatCompletionAudioParam::Format }, api_name: :format # @!attribute voice # The voice the model uses to respond. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. + # `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. + # + # @return [String, Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice] + required :voice, union: -> { OpenAI::Chat::ChatCompletionAudioParam::Voice } + + # @!method initialize(format_:, voice:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionAudioParam} for more details. # - # @return [Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice] - required :voice, enum: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice } - - # @!parse - # # Parameters for audio output. Required when audio output is requested with - # # `modalities: ["audio"]`. - # # [Learn more](https://platform.openai.com/docs/guides/audio). - # # - # # @param format_ [Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Format] - # # @param voice [Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice] - # # - # def initialize(format_:, voice:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). # + # @param format_ [Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Format] Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, + # + # @param voice [String, Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice] The voice the model uses to respond. Supported voices are + # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, - # or `pcm16`. - class Format < OpenAI::Enum + # or `pcm16`. + # + # @see OpenAI::Models::Chat::ChatCompletionAudioParam#format_ + module Format + extend OpenAI::Internal::Type::Enum + WAV = :wav + AAC = :aac MP3 = :mp3 FLAC = :flac OPUS = :opus PCM16 = :pcm16 - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The voice the model uses to respond. Supported voices are `alloy`, `ash`, - # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. - class Voice < OpenAI::Enum + # `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. + # + # @see OpenAI::Models::Chat::ChatCompletionAudioParam#voice + module Voice + extend OpenAI::Internal::Type::Union + + variant String + + variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::ALLOY } + + variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::ASH } + + variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::BALLAD } + + variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::CORAL } + + variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::ECHO } + + variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::SAGE } + + variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::SHIMMER } + + variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::VERSE } + + # @!method self.variants + # @return [Array(String, Symbol)] + + define_sorbet_constant!(:Variants) do + T.type_alias { T.any(String, OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol) } + end + + # @!group + ALLOY = :alloy ASH = :ash BALLAD = :ballad @@ -58,7 +91,7 @@ class Voice < OpenAI::Enum SHIMMER = :shimmer VERSE = :verse - finalize! + # @!endgroup end end end diff --git a/lib/openai/models/chat/chat_completion_chunk.rb b/lib/openai/models/chat/chat_completion_chunk.rb index 8d0ec4c1..4e2642bd 100644 --- a/lib/openai/models/chat/chat_completion_chunk.rb +++ b/lib/openai/models/chat/chat_completion_chunk.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Chat - class ChatCompletionChunk < OpenAI::BaseModel + class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel # @!attribute id # A unique identifier for the chat completion. Each chunk has the same ID. # @@ -12,15 +12,15 @@ class ChatCompletionChunk < OpenAI::BaseModel # @!attribute choices # A list of chat completion choices. Can contain more than one elements if `n` is - # greater than 1. Can also be empty for the last chunk if you set - # `stream_options: {"include_usage": true}`. + # greater than 1. Can also be empty for the last chunk if you set + # `stream_options: {"include_usage": true}`. # # @return [Array] - required :choices, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionChunk::Choice] } + required :choices, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionChunk::Choice] } # @!attribute created # The Unix timestamp (in seconds) of when the chat completion was created. Each - # chunk has the same timestamp. + # chunk has the same timestamp. # # @return [Integer] required :created, Integer @@ -38,80 +38,92 @@ class ChatCompletionChunk < OpenAI::BaseModel required :object, const: :"chat.completion.chunk" # @!attribute service_tier - # The service tier used for processing the request. + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil] - optional :service_tier, enum: -> { OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier }, nil?: true + optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletionChunk::ServiceTier }, nil?: true - # @!attribute [r] system_fingerprint + # @!attribute system_fingerprint + # @deprecated + # # This fingerprint represents the backend configuration that the model runs with. - # Can be used in conjunction with the `seed` request parameter to understand when - # backend changes have been made that might impact determinism. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. # # @return [String, nil] optional :system_fingerprint, String - # @!parse - # # @return [String] - # attr_writer :system_fingerprint - # @!attribute usage # An optional field that will only be present when you set - # `stream_options: {"include_usage": true}` in your request. When present, it - # contains a null value except for the last chunk which contains the token usage - # statistics for the entire request. + # `stream_options: {"include_usage": true}` in your request. When present, it + # contains a null value **except for the last chunk** which contains the token + # usage statistics for the entire request. + # + # **NOTE:** If the stream is interrupted or cancelled, you may not receive the + # final usage chunk which contains the total token usage for the request. # # @return [OpenAI::Models::CompletionUsage, nil] - optional :usage, -> { OpenAI::Models::CompletionUsage }, nil?: true - - # @!parse - # # Represents a streamed chunk of a chat completion response returned by the model, - # # based on the provided input. - # # [Learn more](https://platform.openai.com/docs/guides/streaming-responses). - # # - # # @param id [String] - # # @param choices [Array] - # # @param created [Integer] - # # @param model [String] - # # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil] - # # @param system_fingerprint [String] - # # @param usage [OpenAI::Models::CompletionUsage, nil] - # # @param object [Symbol, :"chat.completion.chunk"] - # # - # def initialize( - # id:, - # choices:, - # created:, - # model:, - # service_tier: nil, - # system_fingerprint: nil, - # usage: nil, - # object: :"chat.completion.chunk", - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Choice < OpenAI::BaseModel + optional :usage, -> { OpenAI::CompletionUsage }, nil?: true + + # @!method initialize(id:, choices:, created:, model:, service_tier: nil, system_fingerprint: nil, usage: nil, object: :"chat.completion.chunk") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionChunk} for more details. + # + # Represents a streamed chunk of a chat completion response returned by the model, + # based on the provided input. + # [Learn more](https://platform.openai.com/docs/guides/streaming-responses). + # + # @param id [String] A unique identifier for the chat completion. Each chunk has the same ID. + # + # @param choices [Array] A list of chat completion choices. Can contain more than one elements if `n` is + # + # @param created [Integer] The Unix timestamp (in seconds) of when the chat completion was created. Each ch + # + # @param model [String] The model to generate the completion. + # + # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil] Specifies the processing type used for serving the request. + # + # @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with. + # + # @param usage [OpenAI::Models::CompletionUsage, nil] An optional field that will only be present when you set + # + # @param object [Symbol, :"chat.completion.chunk"] The object type, which is always `chat.completion.chunk`. + + class Choice < OpenAI::Internal::Type::BaseModel # @!attribute delta # A chat completion delta generated by streamed model responses. # # @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta] - required :delta, -> { OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta } + required :delta, -> { OpenAI::Chat::ChatCompletionChunk::Choice::Delta } # @!attribute finish_reason # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::FinishReason, nil] required :finish_reason, - enum: -> { OpenAI::Models::Chat::ChatCompletionChunk::Choice::FinishReason }, + enum: -> { + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason + }, nil?: true # @!attribute index @@ -124,35 +136,36 @@ class Choice < OpenAI::BaseModel # Log probability information for the choice. # # @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs, nil] - optional :logprobs, -> { OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs }, nil?: true - - # @!parse - # # @param delta [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta] - # # @param finish_reason [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::FinishReason, nil] - # # @param index [Integer] - # # @param logprobs [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs, nil] - # # - # def initialize(delta:, finish_reason:, index:, logprobs: nil, **) = super + optional :logprobs, -> { OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs }, nil?: true - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(delta:, finish_reason:, index:, logprobs: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionChunk::Choice} for more details. + # + # @param delta [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta] A chat completion delta generated by streamed model responses. + # + # @param finish_reason [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::FinishReason, nil] The reason the model stopped generating tokens. This will be `stop` if the model + # + # @param index [Integer] The index of the choice in the list of choices. + # + # @param logprobs [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs, nil] Log probability information for the choice. - class Delta < OpenAI::BaseModel + # @see OpenAI::Models::Chat::ChatCompletionChunk::Choice#delta + class Delta < OpenAI::Internal::Type::BaseModel # @!attribute content # The contents of the chunk message. # # @return [String, nil] optional :content, String, nil?: true - # @!attribute [r] function_call + # @!attribute function_call + # @deprecated + # # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. # # @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, nil] - optional :function_call, -> { OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall } - - # @!parse - # # @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall] - # attr_writer :function_call + optional :function_call, -> { OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall } # @!attribute refusal # The refusal message generated by the model. @@ -160,230 +173,232 @@ class Delta < OpenAI::BaseModel # @return [String, nil] optional :refusal, String, nil?: true - # @!attribute [r] role + # @!attribute role # The role of the author of this message. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::Role, nil] - optional :role, enum: -> { OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::Role } + optional :role, enum: -> { OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role } - # @!parse - # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::Role] - # attr_writer :role - - # @!attribute [r] tool_calls + # @!attribute tool_calls # # @return [Array, nil] optional :tool_calls, - -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] } - - # @!parse - # # @return [Array] - # attr_writer :tool_calls + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] } - # @!parse - # # A chat completion delta generated by streamed model responses. - # # - # # @param content [String, nil] - # # @param function_call [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall] - # # @param refusal [String, nil] - # # @param role [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::Role] - # # @param tool_calls [Array] - # # - # def initialize(content: nil, function_call: nil, refusal: nil, role: nil, tool_calls: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(content: nil, function_call: nil, refusal: nil, role: nil, tool_calls: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta} for more details. + # + # A chat completion delta generated by streamed model responses. + # + # @param content [String, nil] The contents of the chunk message. + # + # @param function_call [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall] Deprecated and replaced by `tool_calls`. The name and arguments of a function th + # + # @param refusal [String, nil] The refusal message generated by the model. + # + # @param role [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::Role] The role of the author of this message. + # + # @param tool_calls [Array] # @deprecated # - class FunctionCall < OpenAI::BaseModel - # @!attribute [r] arguments + # @see OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta#function_call + class FunctionCall < OpenAI::Internal::Type::BaseModel + # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. # # @return [String, nil] optional :arguments, String - # @!parse - # # @return [String] - # attr_writer :arguments - - # @!attribute [r] name + # @!attribute name # The name of the function to call. # # @return [String, nil] optional :name, String - # @!parse - # # @return [String] - # attr_writer :name - - # @!parse - # # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # # that should be called, as generated by the model. - # # - # # @param arguments [String] - # # @param name [String] - # # - # def initialize(arguments: nil, name: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(arguments: nil, name: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall} for + # more details. + # + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + # + # @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma + # + # @param name [String] The name of the function to call. end - # @abstract - # # The role of the author of this message. - class Role < OpenAI::Enum + # + # @see OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta#role + module Role + extend OpenAI::Internal::Type::Enum + DEVELOPER = :developer SYSTEM = :system USER = :user ASSISTANT = :assistant TOOL = :tool - finalize! + # @!method self.values + # @return [Array] end - class ToolCall < OpenAI::BaseModel + class ToolCall < OpenAI::Internal::Type::BaseModel # @!attribute index # # @return [Integer] required :index, Integer - # @!attribute [r] id + # @!attribute id # The ID of the tool call. # # @return [String, nil] optional :id, String - # @!parse - # # @return [String] - # attr_writer :id - - # @!attribute [r] function + # @!attribute function # # @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, nil] - optional :function, -> { OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function } + optional :function, -> { OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function } - # @!parse - # # @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function] - # attr_writer :function - - # @!attribute [r] type + # @!attribute type # The type of the tool. Currently, only `function` is supported. # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type, nil] - optional :type, enum: -> { OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type } - - # @!parse - # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type] - # attr_writer :type + optional :type, enum: -> { OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type } - # @!parse - # # @param index [Integer] - # # @param id [String] - # # @param function [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function] - # # @param type [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type] - # # - # def initialize(index:, id: nil, function: nil, type: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(index:, id: nil, function: nil, type: nil) + # @param index [Integer] + # + # @param id [String] The ID of the tool call. + # + # @param function [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function] + # + # @param type [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type] The type of the tool. Currently, only `function` is supported. - class Function < OpenAI::BaseModel - # @!attribute [r] arguments + # @see OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall#function + class Function < OpenAI::Internal::Type::BaseModel + # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. # # @return [String, nil] optional :arguments, String - # @!parse - # # @return [String] - # attr_writer :arguments - - # @!attribute [r] name + # @!attribute name # The name of the function to call. # # @return [String, nil] optional :name, String - # @!parse - # # @return [String] - # attr_writer :name - - # @!parse - # # @param arguments [String] - # # @param name [String] - # # - # def initialize(arguments: nil, name: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(arguments: nil, name: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function} + # for more details. + # + # @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma + # + # @param name [String] The name of the function to call. end - # @abstract - # # The type of the tool. Currently, only `function` is supported. - class Type < OpenAI::Enum + # + # @see OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall#type + module Type + extend OpenAI::Internal::Type::Enum + FUNCTION = :function - finalize! + # @!method self.values + # @return [Array] end end end - # @abstract - # # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, `content_filter` if - # content was omitted due to a flag from our content filters, `tool_calls` if the - # model called a tool, or `function_call` (deprecated) if the model called a - # function. - class FinishReason < OpenAI::Enum + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. + # + # @see OpenAI::Models::Chat::ChatCompletionChunk::Choice#finish_reason + module FinishReason + extend OpenAI::Internal::Type::Enum + STOP = :stop LENGTH = :length TOOL_CALLS = :tool_calls CONTENT_FILTER = :content_filter FUNCTION_CALL = :function_call - finalize! + # @!method self.values + # @return [Array] end - class Logprobs < OpenAI::BaseModel + # @see OpenAI::Models::Chat::ChatCompletionChunk::Choice#logprobs + class Logprobs < OpenAI::Internal::Type::BaseModel # @!attribute content # A list of message content tokens with log probability information. # # @return [Array, nil] - required :content, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionTokenLogprob] }, nil?: true + required :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] }, + nil?: true # @!attribute refusal # A list of message refusal tokens with log probability information. # # @return [Array, nil] - required :refusal, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionTokenLogprob] }, nil?: true - - # @!parse - # # Log probability information for the choice. - # # - # # @param content [Array, nil] - # # @param refusal [Array, nil] - # # - # def initialize(content:, refusal:, **) = super + required :refusal, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] }, + nil?: true - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(content:, refusal:) + # Log probability information for the choice. + # + # @param content [Array, nil] A list of message content tokens with log probability information. + # + # @param refusal [Array, nil] A list of message refusal tokens with log probability information. end end - # @abstract + # Specifies the processing type used for serving the request. # - # The service tier used for processing the request. - class ServiceTier < OpenAI::Enum - SCALE = :scale + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + # + # @see OpenAI::Models::Chat::ChatCompletionChunk#service_tier + module ServiceTier + extend OpenAI::Internal::Type::Enum + + AUTO = :auto DEFAULT = :default + FLEX = :flex + SCALE = :scale + PRIORITY = :priority - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/chat/chat_completion_content_part.rb b/lib/openai/models/chat/chat_completion_content_part.rb index 836e1222..ba2d6918 100644 --- a/lib/openai/models/chat/chat_completion_content_part.rb +++ b/lib/openai/models/chat/chat_completion_content_part.rb @@ -3,30 +3,30 @@ module OpenAI module Models module Chat - # @abstract - # # Learn about - # [text inputs](https://platform.openai.com/docs/guides/text-generation). - class ChatCompletionContentPart < OpenAI::Union + # [text inputs](https://platform.openai.com/docs/guides/text-generation). + module ChatCompletionContentPart + extend OpenAI::Internal::Type::Union + discriminator :type # Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation). - variant :text, -> { OpenAI::Models::Chat::ChatCompletionContentPartText } + variant :text, -> { OpenAI::Chat::ChatCompletionContentPartText } # Learn about [image inputs](https://platform.openai.com/docs/guides/vision). - variant :image_url, -> { OpenAI::Models::Chat::ChatCompletionContentPartImage } + variant :image_url, -> { OpenAI::Chat::ChatCompletionContentPartImage } # Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). - variant :input_audio, -> { OpenAI::Models::Chat::ChatCompletionContentPartInputAudio } + variant :input_audio, -> { OpenAI::Chat::ChatCompletionContentPartInputAudio } # Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text generation. - variant :file, -> { OpenAI::Models::Chat::ChatCompletionContentPart::File } + variant :file, -> { OpenAI::Chat::ChatCompletionContentPart::File } - class File < OpenAI::BaseModel + class File < OpenAI::Internal::Type::BaseModel # @!attribute file # # @return [OpenAI::Models::Chat::ChatCompletionContentPart::File::File] - required :file, -> { OpenAI::Models::Chat::ChatCompletionContentPart::File::File } + required :file, -> { OpenAI::Chat::ChatCompletionContentPart::File::File } # @!attribute type # The type of the content part. Always `file`. @@ -34,59 +34,49 @@ class File < OpenAI::BaseModel # @return [Symbol, :file] required :type, const: :file - # @!parse - # # Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text - # # generation. - # # - # # @param file [OpenAI::Models::Chat::ChatCompletionContentPart::File::File] - # # @param type [Symbol, :file] - # # - # def initialize(file:, type: :file, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file:, type: :file) + # Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text + # generation. + # + # @param file [OpenAI::Models::Chat::ChatCompletionContentPart::File::File] + # + # @param type [Symbol, :file] The type of the content part. Always `file`. - class File < OpenAI::BaseModel - # @!attribute [r] file_data + # @see OpenAI::Models::Chat::ChatCompletionContentPart::File#file + class File < OpenAI::Internal::Type::BaseModel + # @!attribute file_data # The base64 encoded file data, used when passing the file to the model as a - # string. + # string. # # @return [String, nil] optional :file_data, String - # @!parse - # # @return [String] - # attr_writer :file_data - - # @!attribute [r] file_id + # @!attribute file_id # The ID of an uploaded file to use as input. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!attribute [r] file_name + # @!attribute filename # The name of the file, used when passing the file to the model as a string. # # @return [String, nil] - optional :file_name, String + optional :filename, String - # @!parse - # # @return [String] - # attr_writer :file_name - - # @!parse - # # @param file_data [String] - # # @param file_id [String] - # # @param file_name [String] - # # - # def initialize(file_data: nil, file_id: nil, file_name: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_data: nil, file_id: nil, filename: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionContentPart::File::File} for more details. + # + # @param file_data [String] The base64 encoded file data, used when passing the file to the model + # + # @param file_id [String] The ID of an uploaded file to use as input. + # + # @param filename [String] The name of the file, used when passing the file to the model as a end end + + # @!method self.variants + # @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, OpenAI::Models::Chat::ChatCompletionContentPart::File)] end end diff --git a/lib/openai/models/chat/chat_completion_content_part_image.rb b/lib/openai/models/chat/chat_completion_content_part_image.rb index 9990262c..f5971945 100644 --- a/lib/openai/models/chat/chat_completion_content_part_image.rb +++ b/lib/openai/models/chat/chat_completion_content_part_image.rb @@ -3,11 +3,11 @@ module OpenAI module Models module Chat - class ChatCompletionContentPartImage < OpenAI::BaseModel + class ChatCompletionContentPartImage < OpenAI::Internal::Type::BaseModel # @!attribute image_url # # @return [OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL] - required :image_url, -> { OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL } + required :image_url, -> { OpenAI::Chat::ChatCompletionContentPartImage::ImageURL } # @!attribute type # The type of the content part. @@ -15,52 +15,50 @@ class ChatCompletionContentPartImage < OpenAI::BaseModel # @return [Symbol, :image_url] required :type, const: :image_url - # @!parse - # # Learn about [image inputs](https://platform.openai.com/docs/guides/vision). - # # - # # @param image_url [OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL] - # # @param type [Symbol, :image_url] - # # - # def initialize(image_url:, type: :image_url, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(image_url:, type: :image_url) + # Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + # + # @param image_url [OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL] + # + # @param type [Symbol, :image_url] The type of the content part. - class ImageURL < OpenAI::BaseModel + # @see OpenAI::Models::Chat::ChatCompletionContentPartImage#image_url + class ImageURL < OpenAI::Internal::Type::BaseModel # @!attribute url # Either a URL of the image or the base64 encoded image data. # # @return [String] required :url, String - # @!attribute [r] detail + # @!attribute detail # Specifies the detail level of the image. Learn more in the - # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::Detail, nil] - optional :detail, enum: -> { OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::Detail } - - # @!parse - # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::Detail] - # attr_writer :detail - - # @!parse - # # @param url [String] - # # @param detail [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::Detail] - # # - # def initialize(url:, detail: nil, **) = super + optional :detail, enum: -> { OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(url:, detail: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL} for more + # details. + # + # @param url [String] Either a URL of the image or the base64 encoded image data. # + # @param detail [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::Detail] Specifies the detail level of the image. Learn more in the [Vision guide](https: + # Specifies the detail level of the image. Learn more in the - # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). - class Detail < OpenAI::Enum + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + # + # @see OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL#detail + module Detail + extend OpenAI::Internal::Type::Enum + AUTO = :auto LOW = :low HIGH = :high - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/chat/chat_completion_content_part_input_audio.rb b/lib/openai/models/chat/chat_completion_content_part_input_audio.rb index 0ed3dff8..d8f86fb2 100644 --- a/lib/openai/models/chat/chat_completion_content_part_input_audio.rb +++ b/lib/openai/models/chat/chat_completion_content_part_input_audio.rb @@ -3,11 +3,11 @@ module OpenAI module Models module Chat - class ChatCompletionContentPartInputAudio < OpenAI::BaseModel + class ChatCompletionContentPartInputAudio < OpenAI::Internal::Type::BaseModel # @!attribute input_audio # # @return [OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio] - required :input_audio, -> { OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio } + required :input_audio, -> { OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio } # @!attribute type # The type of the content part. Always `input_audio`. @@ -15,17 +15,15 @@ class ChatCompletionContentPartInputAudio < OpenAI::BaseModel # @return [Symbol, :input_audio] required :type, const: :input_audio - # @!parse - # # Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). - # # - # # @param input_audio [OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio] - # # @param type [Symbol, :input_audio] - # # - # def initialize(input_audio:, type: :input_audio, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(input_audio:, type: :input_audio) + # Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). + # + # @param input_audio [OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio] + # + # @param type [Symbol, :input_audio] The type of the content part. Always `input_audio`. - class InputAudio < OpenAI::BaseModel + # @see OpenAI::Models::Chat::ChatCompletionContentPartInputAudio#input_audio + class InputAudio < OpenAI::Internal::Type::BaseModel # @!attribute data # Base64 encoded audio data. # @@ -37,25 +35,29 @@ class InputAudio < OpenAI::BaseModel # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format] required :format_, - enum: -> { OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format }, + enum: -> { OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format }, api_name: :format - # @!parse - # # @param data [String] - # # @param format_ [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format] - # # - # def initialize(data:, format_:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(data:, format_:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio} for more + # details. # + # @param data [String] Base64 encoded audio data. + # + # @param format_ [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format] The format of the encoded audio data. Currently supports "wav" and "mp3". + # The format of the encoded audio data. Currently supports "wav" and "mp3". - class Format < OpenAI::Enum + # + # @see OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio#format_ + module Format + extend OpenAI::Internal::Type::Enum + WAV = :wav MP3 = :mp3 - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/chat/chat_completion_content_part_refusal.rb b/lib/openai/models/chat/chat_completion_content_part_refusal.rb index 83e6e914..5f1e561e 100644 --- a/lib/openai/models/chat/chat_completion_content_part_refusal.rb +++ b/lib/openai/models/chat/chat_completion_content_part_refusal.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Chat - class ChatCompletionContentPartRefusal < OpenAI::BaseModel + class ChatCompletionContentPartRefusal < OpenAI::Internal::Type::BaseModel # @!attribute refusal # The refusal message generated by the model. # @@ -16,13 +16,10 @@ class ChatCompletionContentPartRefusal < OpenAI::BaseModel # @return [Symbol, :refusal] required :type, const: :refusal - # @!parse - # # @param refusal [String] - # # @param type [Symbol, :refusal] - # # - # def initialize(refusal:, type: :refusal, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(refusal:, type: :refusal) + # @param refusal [String] The refusal message generated by the model. + # + # @param type [Symbol, :refusal] The type of the content part. end end diff --git a/lib/openai/models/chat/chat_completion_content_part_text.rb b/lib/openai/models/chat/chat_completion_content_part_text.rb index 1fa4cbdb..3800b650 100644 --- a/lib/openai/models/chat/chat_completion_content_part_text.rb +++ b/lib/openai/models/chat/chat_completion_content_part_text.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Chat - class ChatCompletionContentPartText < OpenAI::BaseModel + class ChatCompletionContentPartText < OpenAI::Internal::Type::BaseModel # @!attribute text # The text content. # @@ -16,16 +16,13 @@ class ChatCompletionContentPartText < OpenAI::BaseModel # @return [Symbol, :text] required :type, const: :text - # @!parse - # # Learn about - # # [text inputs](https://platform.openai.com/docs/guides/text-generation). - # # - # # @param text [String] - # # @param type [Symbol, :text] - # # - # def initialize(text:, type: :text, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(text:, type: :text) + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). + # + # @param text [String] The text content. + # + # @param type [Symbol, :text] The type of the content part. end end diff --git a/lib/openai/models/chat/chat_completion_custom_tool.rb b/lib/openai/models/chat/chat_completion_custom_tool.rb new file mode 100644 index 00000000..9a4b458b --- /dev/null +++ b/lib/openai/models/chat/chat_completion_custom_tool.rb @@ -0,0 +1,158 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionCustomTool < OpenAI::Internal::Type::BaseModel + # @!attribute custom + # Properties of the custom tool. + # + # @return [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom] + required :custom, -> { OpenAI::Chat::ChatCompletionCustomTool::Custom } + + # @!attribute type + # The type of the custom tool. Always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!method initialize(custom:, type: :custom) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionCustomTool} for more details. + # + # A custom tool that processes input using a specified format. + # + # @param custom [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom] Properties of the custom tool. + # + # @param type [Symbol, :custom] The type of the custom tool. Always `custom`. + + # @see OpenAI::Models::Chat::ChatCompletionCustomTool#custom + class Custom < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the custom tool, used to identify it in tool calls. + # + # @return [String] + required :name, String + + # @!attribute description + # Optional description of the custom tool, used to provide more context. + # + # @return [String, nil] + optional :description, String + + # @!attribute format_ + # The input format for the custom tool. Default is unconstrained text. + # + # @return [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Text, OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar, nil] + optional :format_, + union: -> { + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format + }, + api_name: :format + + # @!method initialize(name:, description: nil, format_: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionCustomTool::Custom} for more details. + # + # Properties of the custom tool. + # + # @param name [String] The name of the custom tool, used to identify it in tool calls. + # + # @param description [String] Optional description of the custom tool, used to provide more context. + # + # @param format_ [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Text, OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar] The input format for the custom tool. Default is unconstrained text. + + # The input format for the custom tool. Default is unconstrained text. + # + # @see OpenAI::Models::Chat::ChatCompletionCustomTool::Custom#format_ + module Format + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Unconstrained free-form text. + variant :text, -> { OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text } + + # A grammar defined by the user. + variant :grammar, -> { OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar } + + class Text < OpenAI::Internal::Type::BaseModel + # @!attribute type + # Unconstrained text format. Always `text`. + # + # @return [Symbol, :text] + required :type, const: :text + + # @!method initialize(type: :text) + # Unconstrained free-form text. + # + # @param type [Symbol, :text] Unconstrained text format. Always `text`. + end + + class Grammar < OpenAI::Internal::Type::BaseModel + # @!attribute grammar + # Your chosen grammar. + # + # @return [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar] + required :grammar, -> { OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar } + + # @!attribute type + # Grammar format. Always `grammar`. + # + # @return [Symbol, :grammar] + required :type, const: :grammar + + # @!method initialize(grammar:, type: :grammar) + # A grammar defined by the user. + # + # @param grammar [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar] Your chosen grammar. + # + # @param type [Symbol, :grammar] Grammar format. Always `grammar`. + + # @see OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar#grammar + class Grammar < OpenAI::Internal::Type::BaseModel + # @!attribute definition + # The grammar definition. + # + # @return [String] + required :definition, String + + # @!attribute syntax + # The syntax of the grammar definition. One of `lark` or `regex`. + # + # @return [Symbol, OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax] + required :syntax, + enum: -> { OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax } + + # @!method initialize(definition:, syntax:) + # Your chosen grammar. + # + # @param definition [String] The grammar definition. + # + # @param syntax [Symbol, OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax] The syntax of the grammar definition. One of `lark` or `regex`. + + # The syntax of the grammar definition. One of `lark` or `regex`. + # + # @see OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar#syntax + module Syntax + extend OpenAI::Internal::Type::Enum + + LARK = :lark + REGEX = :regex + + # @!method self.values + # @return [Array] + end + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Text, OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar)] + end + end + end + end + + ChatCompletionCustomTool = Chat::ChatCompletionCustomTool + end +end diff --git a/lib/openai/models/chat/chat_completion_deleted.rb b/lib/openai/models/chat/chat_completion_deleted.rb index 22274c71..2cec245f 100644 --- a/lib/openai/models/chat/chat_completion_deleted.rb +++ b/lib/openai/models/chat/chat_completion_deleted.rb @@ -3,7 +3,8 @@ module OpenAI module Models module Chat - class ChatCompletionDeleted < OpenAI::BaseModel + # @see OpenAI::Resources::Chat::Completions#delete + class ChatCompletionDeleted < OpenAI::Internal::Type::BaseModel # @!attribute id # The ID of the chat completion that was deleted. # @@ -14,7 +15,7 @@ class ChatCompletionDeleted < OpenAI::BaseModel # Whether the chat completion was deleted. # # @return [Boolean] - required :deleted, OpenAI::BooleanModel + required :deleted, OpenAI::Internal::Type::Boolean # @!attribute object # The type of object being deleted. @@ -22,14 +23,12 @@ class ChatCompletionDeleted < OpenAI::BaseModel # @return [Symbol, :"chat.completion.deleted"] required :object, const: :"chat.completion.deleted" - # @!parse - # # @param id [String] - # # @param deleted [Boolean] - # # @param object [Symbol, :"chat.completion.deleted"] - # # - # def initialize(id:, deleted:, object: :"chat.completion.deleted", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, deleted:, object: :"chat.completion.deleted") + # @param id [String] The ID of the chat completion that was deleted. + # + # @param deleted [Boolean] Whether the chat completion was deleted. + # + # @param object [Symbol, :"chat.completion.deleted"] The type of object being deleted. end end diff --git a/lib/openai/models/chat/chat_completion_developer_message_param.rb b/lib/openai/models/chat/chat_completion_developer_message_param.rb index 7735915f..828698cb 100644 --- a/lib/openai/models/chat/chat_completion_developer_message_param.rb +++ b/lib/openai/models/chat/chat_completion_developer_message_param.rb @@ -3,12 +3,12 @@ module OpenAI module Models module Chat - class ChatCompletionDeveloperMessageParam < OpenAI::BaseModel + class ChatCompletionDeveloperMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute content # The contents of the developer message. # # @return [String, Array] - required :content, union: -> { OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam::Content } + required :content, union: -> { OpenAI::Chat::ChatCompletionDeveloperMessageParam::Content } # @!attribute role # The role of the messages author, in this case `developer`. @@ -16,41 +16,45 @@ class ChatCompletionDeveloperMessageParam < OpenAI::BaseModel # @return [Symbol, :developer] required :role, const: :developer - # @!attribute [r] name + # @!attribute name # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. # # @return [String, nil] optional :name, String - # @!parse - # # @return [String] - # attr_writer :name - - # @!parse - # # Developer-provided instructions that the model should follow, regardless of - # # messages sent by the user. With o1 models and newer, `developer` messages - # # replace the previous `system` messages. - # # - # # @param content [String, Array] - # # @param name [String] - # # @param role [Symbol, :developer] - # # - # def initialize(content:, name: nil, role: :developer, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content:, name: nil, role: :developer) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam} for more details. + # + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. + # + # @param content [String, Array] The contents of the developer message. # + # @param name [String] An optional name for the participant. Provides the model information to differen + # + # @param role [Symbol, :developer] The role of the messages author, in this case `developer`. + # The contents of the developer message. - class Content < OpenAI::Union - ChatCompletionContentPartTextArray = OpenAI::ArrayOf[-> { OpenAI::Models::Chat::ChatCompletionContentPartText }] + # + # @see OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam#content + module Content + extend OpenAI::Internal::Type::Union # The contents of the developer message. variant String # An array of content parts with a defined type. For developer messages, only type `text` is supported. - variant OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam::Content::ChatCompletionContentPartTextArray + variant -> { OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam::Content::ChatCompletionContentPartTextArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + ChatCompletionContentPartTextArray = + OpenAI::Internal::Type::ArrayOf[-> { OpenAI::Chat::ChatCompletionContentPartText }] end end end diff --git a/lib/openai/models/chat/chat_completion_function_call_option.rb b/lib/openai/models/chat/chat_completion_function_call_option.rb index 8cc72a51..89566e41 100644 --- a/lib/openai/models/chat/chat_completion_function_call_option.rb +++ b/lib/openai/models/chat/chat_completion_function_call_option.rb @@ -3,22 +3,18 @@ module OpenAI module Models module Chat - class ChatCompletionFunctionCallOption < OpenAI::BaseModel + class ChatCompletionFunctionCallOption < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the function to call. # # @return [String] required :name, String - # @!parse - # # Specifying a particular function via `{"name": "my_function"}` forces the model - # # to call that function. - # # - # # @param name [String] - # # - # def initialize(name:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(name:) + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # @param name [String] The name of the function to call. end end diff --git a/lib/openai/models/chat/chat_completion_function_message_param.rb b/lib/openai/models/chat/chat_completion_function_message_param.rb index 4ce20d75..feb98749 100644 --- a/lib/openai/models/chat/chat_completion_function_message_param.rb +++ b/lib/openai/models/chat/chat_completion_function_message_param.rb @@ -4,8 +4,7 @@ module OpenAI module Models module Chat # @deprecated - # - class ChatCompletionFunctionMessageParam < OpenAI::BaseModel + class ChatCompletionFunctionMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute content # The contents of the function message. # @@ -24,14 +23,12 @@ class ChatCompletionFunctionMessageParam < OpenAI::BaseModel # @return [Symbol, :function] required :role, const: :function - # @!parse - # # @param content [String, nil] - # # @param name [String] - # # @param role [Symbol, :function] - # # - # def initialize(content:, name:, role: :function, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(content:, name:, role: :function) + # @param content [String, nil] The contents of the function message. + # + # @param name [String] The name of the function to call. + # + # @param role [Symbol, :function] The role of the messages author, in this case `function`. end end diff --git a/lib/openai/models/chat/chat_completion_function_tool.rb b/lib/openai/models/chat/chat_completion_function_tool.rb new file mode 100644 index 00000000..dbedf8e5 --- /dev/null +++ b/lib/openai/models/chat/chat_completion_function_tool.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionFunctionTool < OpenAI::Internal::Type::BaseModel + # @!attribute function + # + # @return [OpenAI::Models::FunctionDefinition] + required :function, -> { OpenAI::FunctionDefinition } + + # @!attribute type + # The type of the tool. Currently, only `function` is supported. + # + # @return [Symbol, :function] + required :type, const: :function + + # @!method initialize(function:, type: :function) + # A function tool that can be used to generate a response. + # + # @param function [OpenAI::Models::FunctionDefinition] + # + # @param type [Symbol, :function] The type of the tool. Currently, only `function` is supported. + end + end + + ChatCompletionFunctionTool = Chat::ChatCompletionFunctionTool + end +end diff --git a/lib/openai/models/chat/chat_completion_message.rb b/lib/openai/models/chat/chat_completion_message.rb index 96223d57..e6823a60 100644 --- a/lib/openai/models/chat/chat_completion_message.rb +++ b/lib/openai/models/chat/chat_completion_message.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Chat - class ChatCompletionMessage < OpenAI::BaseModel + class ChatCompletionMessage < OpenAI::Internal::Type::BaseModel # @!attribute content # The contents of the message. # @@ -22,73 +22,59 @@ class ChatCompletionMessage < OpenAI::BaseModel # @return [Symbol, :assistant] required :role, const: :assistant - # @!attribute [r] annotations + # @!attribute annotations # Annotations for the message, when applicable, as when using the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). # # @return [Array, nil] - optional :annotations, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionMessage::Annotation] } - - # @!parse - # # @return [Array] - # attr_writer :annotations + optional :annotations, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionMessage::Annotation] } # @!attribute audio # If the audio output modality is requested, this object contains data about the - # audio response from the model. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). # # @return [OpenAI::Models::Chat::ChatCompletionAudio, nil] - optional :audio, -> { OpenAI::Models::Chat::ChatCompletionAudio }, nil?: true + optional :audio, -> { OpenAI::Chat::ChatCompletionAudio }, nil?: true - # @!attribute [r] function_call + # @!attribute function_call + # @deprecated + # # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # that should be called, as generated by the model. + # that should be called, as generated by the model. # # @return [OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall, nil] - optional :function_call, -> { OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall } + optional :function_call, -> { OpenAI::Chat::ChatCompletionMessage::FunctionCall } - # @!parse - # # @return [OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall] - # attr_writer :function_call - - # @!attribute [r] tool_calls + # @!attribute tool_calls # The tool calls generated by the model, such as function calls. # - # @return [Array, nil] - optional :tool_calls, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionMessageToolCall] } - - # @!parse - # # @return [Array] - # attr_writer :tool_calls - - # @!parse - # # A chat completion message generated by the model. - # # - # # @param content [String, nil] - # # @param refusal [String, nil] - # # @param annotations [Array] - # # @param audio [OpenAI::Models::Chat::ChatCompletionAudio, nil] - # # @param function_call [OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall] - # # @param tool_calls [Array] - # # @param role [Symbol, :assistant] - # # - # def initialize( - # content:, - # refusal:, - # annotations: nil, - # audio: nil, - # function_call: nil, - # tool_calls: nil, - # role: :assistant, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Annotation < OpenAI::BaseModel + # @return [Array, nil] + optional :tool_calls, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionMessageToolCall] } + + # @!method initialize(content:, refusal:, annotations: nil, audio: nil, function_call: nil, tool_calls: nil, role: :assistant) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionMessage} for more details. + # + # A chat completion message generated by the model. + # + # @param content [String, nil] The contents of the message. + # + # @param refusal [String, nil] The refusal message generated by the model. + # + # @param annotations [Array] Annotations for the message, when applicable, as when using the + # + # @param audio [OpenAI::Models::Chat::ChatCompletionAudio, nil] If the audio output modality is requested, this object contains data + # + # @param function_call [OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall] Deprecated and replaced by `tool_calls`. The name and arguments of a function th + # + # @param tool_calls [Array] The tool calls generated by the model, such as function calls. + # + # @param role [Symbol, :assistant] The role of the author of this message. + + class Annotation < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of the URL citation. Always `url_citation`. # @@ -99,19 +85,17 @@ class Annotation < OpenAI::BaseModel # A URL citation when using web search. # # @return [OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation] - required :url_citation, -> { OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation } + required :url_citation, -> { OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation } - # @!parse - # # A URL citation when using web search. - # # - # # @param url_citation [OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation] - # # @param type [Symbol, :url_citation] - # # - # def initialize(url_citation:, type: :url_citation, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(url_citation:, type: :url_citation) + # A URL citation when using web search. + # + # @param url_citation [OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation] A URL citation when using web search. + # + # @param type [Symbol, :url_citation] The type of the URL citation. Always `url_citation`. - class URLCitation < OpenAI::BaseModel + # @see OpenAI::Models::Chat::ChatCompletionMessage::Annotation#url_citation + class URLCitation < OpenAI::Internal::Type::BaseModel # @!attribute end_index # The index of the last character of the URL citation in the message. # @@ -136,28 +120,28 @@ class URLCitation < OpenAI::BaseModel # @return [String] required :url, String - # @!parse - # # A URL citation when using web search. - # # - # # @param end_index [Integer] - # # @param start_index [Integer] - # # @param title [String] - # # @param url [String] - # # - # def initialize(end_index:, start_index:, title:, url:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(end_index:, start_index:, title:, url:) + # A URL citation when using web search. + # + # @param end_index [Integer] The index of the last character of the URL citation in the message. + # + # @param start_index [Integer] The index of the first character of the URL citation in the message. + # + # @param title [String] The title of the web resource. + # + # @param url [String] The URL of the web resource. end end # @deprecated # - class FunctionCall < OpenAI::BaseModel + # @see OpenAI::Models::Chat::ChatCompletionMessage#function_call + class FunctionCall < OpenAI::Internal::Type::BaseModel # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. # # @return [String] required :arguments, String @@ -168,16 +152,16 @@ class FunctionCall < OpenAI::BaseModel # @return [String] required :name, String - # @!parse - # # Deprecated and replaced by `tool_calls`. The name and arguments of a function - # # that should be called, as generated by the model. - # # - # # @param arguments [String] - # # @param name [String] - # # - # def initialize(arguments:, name:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(arguments:, name:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall} for more details. + # + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + # + # @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma + # + # @param name [String] The name of the function to call. end end end diff --git a/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb b/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb new file mode 100644 index 00000000..2d83c132 --- /dev/null +++ b/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionMessageCustomToolCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The ID of the tool call. + # + # @return [String] + required :id, String + + # @!attribute custom + # The custom tool that the model called. + # + # @return [OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall::Custom] + required :custom, -> { OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom } + + # @!attribute type + # The type of the tool. Always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!method initialize(id:, custom:, type: :custom) + # A call to a custom tool created by the model. + # + # @param id [String] The ID of the tool call. + # + # @param custom [OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall::Custom] The custom tool that the model called. + # + # @param type [Symbol, :custom] The type of the tool. Always `custom`. + + # @see OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall#custom + class Custom < OpenAI::Internal::Type::BaseModel + # @!attribute input + # The input for the custom tool call generated by the model. + # + # @return [String] + required :input, String + + # @!attribute name + # The name of the custom tool to call. + # + # @return [String] + required :name, String + + # @!method initialize(input:, name:) + # The custom tool that the model called. + # + # @param input [String] The input for the custom tool call generated by the model. + # + # @param name [String] The name of the custom tool to call. + end + end + end + + ChatCompletionMessageCustomToolCall = Chat::ChatCompletionMessageCustomToolCall + end +end diff --git a/lib/openai/models/chat/chat_completion_message_function_tool_call.rb b/lib/openai/models/chat/chat_completion_message_function_tool_call.rb new file mode 100644 index 00000000..a9514a10 --- /dev/null +++ b/lib/openai/models/chat/chat_completion_message_function_tool_call.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionMessageFunctionToolCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The ID of the tool call. + # + # @return [String] + required :id, String + + # @!attribute function + # The function that the model called. + # + # @return [OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall::Function] + required :function, -> { OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function } + + # @!attribute type + # The type of the tool. Currently, only `function` is supported. + # + # @return [Symbol, :function] + required :type, const: :function + + # @!method initialize(id:, function:, type: :function) + # A call to a function tool created by the model. + # + # @param id [String] The ID of the tool call. + # + # @param function [OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall::Function] The function that the model called. + # + # @param type [Symbol, :function] The type of the tool. Currently, only `function` is supported. + + # @see OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall#function + class Function < OpenAI::Internal::Type::BaseModel + # @!attribute arguments + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + # + # @return [String] + required :arguments, String + + # @!attribute name + # The name of the function to call. + # + # @return [String] + required :name, String + + # @!method initialize(arguments:, name:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall::Function} for more + # details. + # + # The function that the model called. + # + # @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma + # + # @param name [String] The name of the function to call. + end + end + end + + ChatCompletionMessageFunctionToolCall = Chat::ChatCompletionMessageFunctionToolCall + end +end diff --git a/lib/openai/models/chat/chat_completion_message_param.rb b/lib/openai/models/chat/chat_completion_message_param.rb index dac398d3..b25933e3 100644 --- a/lib/openai/models/chat/chat_completion_message_param.rb +++ b/lib/openai/models/chat/chat_completion_message_param.rb @@ -3,34 +3,37 @@ module OpenAI module Models module Chat - # @abstract - # # Developer-provided instructions that the model should follow, regardless of - # messages sent by the user. With o1 models and newer, `developer` messages - # replace the previous `system` messages. - class ChatCompletionMessageParam < OpenAI::Union + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. + module ChatCompletionMessageParam + extend OpenAI::Internal::Type::Union + discriminator :role # Developer-provided instructions that the model should follow, regardless of # messages sent by the user. With o1 models and newer, `developer` messages # replace the previous `system` messages. - variant :developer, -> { OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam } + variant :developer, -> { OpenAI::Chat::ChatCompletionDeveloperMessageParam } # Developer-provided instructions that the model should follow, regardless of # messages sent by the user. With o1 models and newer, use `developer` messages # for this purpose instead. - variant :system, -> { OpenAI::Models::Chat::ChatCompletionSystemMessageParam } + variant :system, -> { OpenAI::Chat::ChatCompletionSystemMessageParam } # Messages sent by an end user, containing prompts or additional context # information. - variant :user, -> { OpenAI::Models::Chat::ChatCompletionUserMessageParam } + variant :user, -> { OpenAI::Chat::ChatCompletionUserMessageParam } # Messages sent by the model in response to user messages. - variant :assistant, -> { OpenAI::Models::Chat::ChatCompletionAssistantMessageParam } + variant :assistant, -> { OpenAI::Chat::ChatCompletionAssistantMessageParam } + + variant :tool, -> { OpenAI::Chat::ChatCompletionToolMessageParam } - variant :tool, -> { OpenAI::Models::Chat::ChatCompletionToolMessageParam } + variant :function, -> { OpenAI::Chat::ChatCompletionFunctionMessageParam } - variant :function, -> { OpenAI::Models::Chat::ChatCompletionFunctionMessageParam } + # @!method self.variants + # @return [Array(OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam)] end end diff --git a/lib/openai/models/chat/chat_completion_message_tool_call.rb b/lib/openai/models/chat/chat_completion_message_tool_call.rb index f4628729..15d9bd07 100644 --- a/lib/openai/models/chat/chat_completion_message_tool_call.rb +++ b/lib/openai/models/chat/chat_completion_message_tool_call.rb @@ -3,60 +3,20 @@ module OpenAI module Models module Chat - class ChatCompletionMessageToolCall < OpenAI::BaseModel - # @!attribute id - # The ID of the tool call. - # - # @return [String] - required :id, String + # A call to a function tool created by the model. + module ChatCompletionMessageToolCall + extend OpenAI::Internal::Type::Union - # @!attribute function - # The function that the model called. - # - # @return [OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function] - required :function, -> { OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function } + discriminator :type - # @!attribute type - # The type of the tool. Currently, only `function` is supported. - # - # @return [Symbol, :function] - required :type, const: :function + # A call to a function tool created by the model. + variant :function, -> { OpenAI::Chat::ChatCompletionMessageFunctionToolCall } - # @!parse - # # @param id [String] - # # @param function [OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function] - # # @param type [Symbol, :function] - # # - # def initialize(id:, function:, type: :function, **) = super + # A call to a custom tool created by the model. + variant :custom, -> { OpenAI::Chat::ChatCompletionMessageCustomToolCall } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Function < OpenAI::BaseModel - # @!attribute arguments - # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. - # - # @return [String] - required :arguments, String - - # @!attribute name - # The name of the function to call. - # - # @return [String] - required :name, String - - # @!parse - # # The function that the model called. - # # - # # @param arguments [String] - # # @param name [String] - # # - # def initialize(arguments:, name:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - end + # @!method self.variants + # @return [Array(OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall, OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall)] end end diff --git a/lib/openai/models/chat/chat_completion_modality.rb b/lib/openai/models/chat/chat_completion_modality.rb index e7558545..10d3ba8c 100644 --- a/lib/openai/models/chat/chat_completion_modality.rb +++ b/lib/openai/models/chat/chat_completion_modality.rb @@ -3,13 +3,14 @@ module OpenAI module Models module Chat - # @abstract - # - class ChatCompletionModality < OpenAI::Enum + module ChatCompletionModality + extend OpenAI::Internal::Type::Enum + TEXT = :text AUDIO = :audio - finalize! + # @!method self.values + # @return [Array] end end diff --git a/lib/openai/models/chat/chat_completion_named_tool_choice.rb b/lib/openai/models/chat/chat_completion_named_tool_choice.rb index 84744f2a..a7cea41d 100644 --- a/lib/openai/models/chat/chat_completion_named_tool_choice.rb +++ b/lib/openai/models/chat/chat_completion_named_tool_choice.rb @@ -3,42 +3,36 @@ module OpenAI module Models module Chat - class ChatCompletionNamedToolChoice < OpenAI::BaseModel + class ChatCompletionNamedToolChoice < OpenAI::Internal::Type::BaseModel # @!attribute function # # @return [OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function] - required :function, -> { OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function } + required :function, -> { OpenAI::Chat::ChatCompletionNamedToolChoice::Function } # @!attribute type - # The type of the tool. Currently, only `function` is supported. + # For function calling, the type is always `function`. # # @return [Symbol, :function] required :type, const: :function - # @!parse - # # Specifies a tool the model should use. Use to force the model to call a specific - # # function. - # # - # # @param function [OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function] - # # @param type [Symbol, :function] - # # - # def initialize(function:, type: :function, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(function:, type: :function) + # Specifies a tool the model should use. Use to force the model to call a specific + # function. + # + # @param function [OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function] + # + # @param type [Symbol, :function] For function calling, the type is always `function`. - class Function < OpenAI::BaseModel + # @see OpenAI::Models::Chat::ChatCompletionNamedToolChoice#function + class Function < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the function to call. # # @return [String] required :name, String - # @!parse - # # @param name [String] - # # - # def initialize(name:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(name:) + # @param name [String] The name of the function to call. end end end diff --git a/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb b/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb new file mode 100644 index 00000000..cf2d854e --- /dev/null +++ b/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionNamedToolChoiceCustom < OpenAI::Internal::Type::BaseModel + # @!attribute custom + # + # @return [OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom::Custom] + required :custom, -> { OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom } + + # @!attribute type + # For custom tool calling, the type is always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!method initialize(custom:, type: :custom) + # Specifies a tool the model should use. Use to force the model to call a specific + # custom tool. + # + # @param custom [OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom::Custom] + # + # @param type [Symbol, :custom] For custom tool calling, the type is always `custom`. + + # @see OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom#custom + class Custom < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the custom tool to call. + # + # @return [String] + required :name, String + + # @!method initialize(name:) + # @param name [String] The name of the custom tool to call. + end + end + end + + ChatCompletionNamedToolChoiceCustom = Chat::ChatCompletionNamedToolChoiceCustom + end +end diff --git a/lib/openai/models/chat/chat_completion_prediction_content.rb b/lib/openai/models/chat/chat_completion_prediction_content.rb index b7cb311e..52235ae7 100644 --- a/lib/openai/models/chat/chat_completion_prediction_content.rb +++ b/lib/openai/models/chat/chat_completion_prediction_content.rb @@ -3,47 +3,54 @@ module OpenAI module Models module Chat - class ChatCompletionPredictionContent < OpenAI::BaseModel + class ChatCompletionPredictionContent < OpenAI::Internal::Type::BaseModel # @!attribute content # The content that should be matched when generating a model response. If - # generated tokens would match this content, the entire model response can be - # returned much more quickly. + # generated tokens would match this content, the entire model response can be + # returned much more quickly. # # @return [String, Array] - required :content, union: -> { OpenAI::Models::Chat::ChatCompletionPredictionContent::Content } + required :content, union: -> { OpenAI::Chat::ChatCompletionPredictionContent::Content } # @!attribute type # The type of the predicted content you want to provide. This type is currently - # always `content`. + # always `content`. # # @return [Symbol, :content] required :type, const: :content - # @!parse - # # Static predicted output content, such as the content of a text file that is - # # being regenerated. - # # - # # @param content [String, Array] - # # @param type [Symbol, :content] - # # - # def initialize(content:, type: :content, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content:, type: :content) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionPredictionContent} for more details. + # + # Static predicted output content, such as the content of a text file that is + # being regenerated. + # + # @param content [String, Array] The content that should be matched when generating a model response. # + # @param type [Symbol, :content] The type of the predicted content you want to provide. This type is + # The content that should be matched when generating a model response. If - # generated tokens would match this content, the entire model response can be - # returned much more quickly. - class Content < OpenAI::Union - ChatCompletionContentPartTextArray = OpenAI::ArrayOf[-> { OpenAI::Models::Chat::ChatCompletionContentPartText }] + # generated tokens would match this content, the entire model response can be + # returned much more quickly. + # + # @see OpenAI::Models::Chat::ChatCompletionPredictionContent#content + module Content + extend OpenAI::Internal::Type::Union # The content used for a Predicted Output. This is often the # text of a file you are regenerating with minor changes. variant String # An array of content parts with a defined type. Supported options differ based on the [model](https://platform.openai.com/docs/models) being used to generate the response. Can contain text inputs. - variant OpenAI::Models::Chat::ChatCompletionPredictionContent::Content::ChatCompletionContentPartTextArray + variant -> { OpenAI::Models::Chat::ChatCompletionPredictionContent::Content::ChatCompletionContentPartTextArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + ChatCompletionContentPartTextArray = + OpenAI::Internal::Type::ArrayOf[-> { OpenAI::Chat::ChatCompletionContentPartText }] end end end diff --git a/lib/openai/models/chat/chat_completion_role.rb b/lib/openai/models/chat/chat_completion_role.rb index 234d78a6..514fe22c 100644 --- a/lib/openai/models/chat/chat_completion_role.rb +++ b/lib/openai/models/chat/chat_completion_role.rb @@ -3,10 +3,10 @@ module OpenAI module Models module Chat - # @abstract - # # The role of the author of a message - class ChatCompletionRole < OpenAI::Enum + module ChatCompletionRole + extend OpenAI::Internal::Type::Enum + DEVELOPER = :developer SYSTEM = :system USER = :user @@ -14,7 +14,8 @@ class ChatCompletionRole < OpenAI::Enum TOOL = :tool FUNCTION = :function - finalize! + # @!method self.values + # @return [Array] end end diff --git a/lib/openai/models/chat/chat_completion_store_message.rb b/lib/openai/models/chat/chat_completion_store_message.rb index 6dc8cc30..f63c57fe 100644 --- a/lib/openai/models/chat/chat_completion_store_message.rb +++ b/lib/openai/models/chat/chat_completion_store_message.rb @@ -10,14 +10,41 @@ class ChatCompletionStoreMessage < OpenAI::Models::Chat::ChatCompletionMessage # @return [String] required :id, String - # @!parse - # # A chat completion message generated by the model. - # # - # # @param id [String] - # # - # def initialize(id:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!attribute content_parts + # If a content parts array was provided, this is an array of `text` and + # `image_url` parts. Otherwise, null. + # + # @return [Array, nil] + optional :content_parts, + -> { + OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionStoreMessage::ContentPart] + }, + nil?: true + + # @!method initialize(id:, content_parts: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionStoreMessage} for more details. + # + # A chat completion message generated by the model. + # + # @param id [String] The identifier of the chat message. + # + # @param content_parts [Array, nil] If a content parts array was provided, this is an array of `text` and `image_url + + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). + module ContentPart + extend OpenAI::Internal::Type::Union + + # Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation). + variant -> { OpenAI::Chat::ChatCompletionContentPartText } + + # Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + variant -> { OpenAI::Chat::ChatCompletionContentPartImage } + + # @!method self.variants + # @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage)] + end end end diff --git a/lib/openai/models/chat/chat_completion_stream_options.rb b/lib/openai/models/chat/chat_completion_stream_options.rb index be16b6d6..8ce0d03e 100644 --- a/lib/openai/models/chat/chat_completion_stream_options.rb +++ b/lib/openai/models/chat/chat_completion_stream_options.rb @@ -3,28 +3,39 @@ module OpenAI module Models module Chat - class ChatCompletionStreamOptions < OpenAI::BaseModel - # @!attribute [r] include_usage - # If set, an additional chunk will be streamed before the `data: [DONE]` message. - # The `usage` field on this chunk shows the token usage statistics for the entire - # request, and the `choices` field will always be an empty array. All other chunks - # will also include a `usage` field, but with a null value. + class ChatCompletionStreamOptions < OpenAI::Internal::Type::BaseModel + # @!attribute include_obfuscation + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. # # @return [Boolean, nil] - optional :include_usage, OpenAI::BooleanModel - - # @!parse - # # @return [Boolean] - # attr_writer :include_usage + optional :include_obfuscation, OpenAI::Internal::Type::Boolean - # @!parse - # # Options for streaming response. Only set this when you set `stream: true`. - # # - # # @param include_usage [Boolean] - # # - # def initialize(include_usage: nil, **) = super + # @!attribute include_usage + # If set, an additional chunk will be streamed before the `data: [DONE]` message. + # The `usage` field on this chunk shows the token usage statistics for the entire + # request, and the `choices` field will always be an empty array. + # + # All other chunks will also include a `usage` field, but with a null value. + # **NOTE:** If the stream is interrupted, you may not receive the final usage + # chunk which contains the total token usage for the request. + # + # @return [Boolean, nil] + optional :include_usage, OpenAI::Internal::Type::Boolean - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(include_obfuscation: nil, include_usage: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionStreamOptions} for more details. + # + # Options for streaming response. Only set this when you set `stream: true`. + # + # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds + # + # @param include_usage [Boolean] If set, an additional chunk will be streamed before the `data: [DONE]` end end diff --git a/lib/openai/models/chat/chat_completion_system_message_param.rb b/lib/openai/models/chat/chat_completion_system_message_param.rb index 188aa0be..2d391de3 100644 --- a/lib/openai/models/chat/chat_completion_system_message_param.rb +++ b/lib/openai/models/chat/chat_completion_system_message_param.rb @@ -3,12 +3,12 @@ module OpenAI module Models module Chat - class ChatCompletionSystemMessageParam < OpenAI::BaseModel + class ChatCompletionSystemMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute content # The contents of the system message. # # @return [String, Array] - required :content, union: -> { OpenAI::Models::Chat::ChatCompletionSystemMessageParam::Content } + required :content, union: -> { OpenAI::Chat::ChatCompletionSystemMessageParam::Content } # @!attribute role # The role of the messages author, in this case `system`. @@ -16,41 +16,45 @@ class ChatCompletionSystemMessageParam < OpenAI::BaseModel # @return [Symbol, :system] required :role, const: :system - # @!attribute [r] name + # @!attribute name # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. # # @return [String, nil] optional :name, String - # @!parse - # # @return [String] - # attr_writer :name - - # @!parse - # # Developer-provided instructions that the model should follow, regardless of - # # messages sent by the user. With o1 models and newer, use `developer` messages - # # for this purpose instead. - # # - # # @param content [String, Array] - # # @param name [String] - # # @param role [Symbol, :system] - # # - # def initialize(content:, name: nil, role: :system, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content:, name: nil, role: :system) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionSystemMessageParam} for more details. + # + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, use `developer` messages + # for this purpose instead. + # + # @param content [String, Array] The contents of the system message. # + # @param name [String] An optional name for the participant. Provides the model information to differen + # + # @param role [Symbol, :system] The role of the messages author, in this case `system`. + # The contents of the system message. - class Content < OpenAI::Union - ChatCompletionContentPartTextArray = OpenAI::ArrayOf[-> { OpenAI::Models::Chat::ChatCompletionContentPartText }] + # + # @see OpenAI::Models::Chat::ChatCompletionSystemMessageParam#content + module Content + extend OpenAI::Internal::Type::Union # The contents of the system message. variant String # An array of content parts with a defined type. For system messages, only type `text` is supported. - variant OpenAI::Models::Chat::ChatCompletionSystemMessageParam::Content::ChatCompletionContentPartTextArray + variant -> { OpenAI::Models::Chat::ChatCompletionSystemMessageParam::Content::ChatCompletionContentPartTextArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + ChatCompletionContentPartTextArray = + OpenAI::Internal::Type::ArrayOf[-> { OpenAI::Chat::ChatCompletionContentPartText }] end end end diff --git a/lib/openai/models/chat/chat_completion_token_logprob.rb b/lib/openai/models/chat/chat_completion_token_logprob.rb index d4faf321..7a09b7c2 100644 --- a/lib/openai/models/chat/chat_completion_token_logprob.rb +++ b/lib/openai/models/chat/chat_completion_token_logprob.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Chat - class ChatCompletionTokenLogprob < OpenAI::BaseModel + class ChatCompletionTokenLogprob < OpenAI::Internal::Type::BaseModel # @!attribute token # The token. # @@ -12,41 +12,43 @@ class ChatCompletionTokenLogprob < OpenAI::BaseModel # @!attribute bytes # A list of integers representing the UTF-8 bytes representation of the token. - # Useful in instances where characters are represented by multiple tokens and - # their byte representations must be combined to generate the correct text - # representation. Can be `null` if there is no bytes representation for the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. # # @return [Array, nil] - required :bytes, OpenAI::ArrayOf[Integer], nil?: true + required :bytes, OpenAI::Internal::Type::ArrayOf[Integer], nil?: true # @!attribute logprob # The log probability of this token, if it is within the top 20 most likely - # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very - # unlikely. + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. # # @return [Float] required :logprob, Float # @!attribute top_logprobs # List of the most likely tokens and their log probability, at this token - # position. In rare cases, there may be fewer than the number of requested - # `top_logprobs` returned. + # position. In rare cases, there may be fewer than the number of requested + # `top_logprobs` returned. # # @return [Array] required :top_logprobs, - -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob] } + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob::TopLogprob] } - # @!parse - # # @param token [String] - # # @param bytes [Array, nil] - # # @param logprob [Float] - # # @param top_logprobs [Array] - # # - # def initialize(token:, bytes:, logprob:, top_logprobs:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(token:, bytes:, logprob:, top_logprobs:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionTokenLogprob} for more details. + # + # @param token [String] The token. + # + # @param bytes [Array, nil] A list of integers representing the UTF-8 bytes representation of the token. Use + # + # @param logprob [Float] The log probability of this token, if it is within the top 20 most likely tokens + # + # @param top_logprobs [Array] List of the most likely tokens and their log probability, at this token position - class TopLogprob < OpenAI::BaseModel + class TopLogprob < OpenAI::Internal::Type::BaseModel # @!attribute token # The token. # @@ -55,29 +57,30 @@ class TopLogprob < OpenAI::BaseModel # @!attribute bytes # A list of integers representing the UTF-8 bytes representation of the token. - # Useful in instances where characters are represented by multiple tokens and - # their byte representations must be combined to generate the correct text - # representation. Can be `null` if there is no bytes representation for the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. # # @return [Array, nil] - required :bytes, OpenAI::ArrayOf[Integer], nil?: true + required :bytes, OpenAI::Internal::Type::ArrayOf[Integer], nil?: true # @!attribute logprob # The log probability of this token, if it is within the top 20 most likely - # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very - # unlikely. + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. # # @return [Float] required :logprob, Float - # @!parse - # # @param token [String] - # # @param bytes [Array, nil] - # # @param logprob [Float] - # # - # def initialize(token:, bytes:, logprob:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(token:, bytes:, logprob:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob} for more details. + # + # @param token [String] The token. + # + # @param bytes [Array, nil] A list of integers representing the UTF-8 bytes representation of the token. Use + # + # @param logprob [Float] The log probability of this token, if it is within the top 20 most likely tokens end end end diff --git a/lib/openai/models/chat/chat_completion_tool.rb b/lib/openai/models/chat/chat_completion_tool.rb index f027725f..a9a2facf 100644 --- a/lib/openai/models/chat/chat_completion_tool.rb +++ b/lib/openai/models/chat/chat_completion_tool.rb @@ -3,25 +3,20 @@ module OpenAI module Models module Chat - class ChatCompletionTool < OpenAI::BaseModel - # @!attribute function - # - # @return [OpenAI::Models::FunctionDefinition] - required :function, -> { OpenAI::Models::FunctionDefinition } + # A function tool that can be used to generate a response. + module ChatCompletionTool + extend OpenAI::Internal::Type::Union - # @!attribute type - # The type of the tool. Currently, only `function` is supported. - # - # @return [Symbol, :function] - required :type, const: :function + discriminator :type - # @!parse - # # @param function [OpenAI::Models::FunctionDefinition] - # # @param type [Symbol, :function] - # # - # def initialize(function:, type: :function, **) = super + # A function tool that can be used to generate a response. + variant :function, -> { OpenAI::Chat::ChatCompletionFunctionTool } - # def initialize: (Hash | OpenAI::BaseModel) -> void + # A custom tool that processes input using a specified format. + variant :custom, -> { OpenAI::Chat::ChatCompletionCustomTool } + + # @!method self.variants + # @return [Array(OpenAI::Models::Chat::ChatCompletionFunctionTool, OpenAI::Models::Chat::ChatCompletionCustomTool)] end end diff --git a/lib/openai/models/chat/chat_completion_tool_choice_option.rb b/lib/openai/models/chat/chat_completion_tool_choice_option.rb index c704bffb..cbb70889 100644 --- a/lib/openai/models/chat/chat_completion_tool_choice_option.rb +++ b/lib/openai/models/chat/chat_completion_tool_choice_option.rb @@ -3,36 +3,46 @@ module OpenAI module Models module Chat - # @abstract - # # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tool and instead generates a message. `auto` means the model can - # pick between generating a message or calling one or more tools. `required` means - # the model must call one or more tools. Specifying a particular tool via - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # - # `none` is the default when no tools are present. `auto` is the default if tools - # are present. - class ChatCompletionToolChoiceOption < OpenAI::Union + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. + module ChatCompletionToolChoiceOption + extend OpenAI::Internal::Type::Union + # `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. - variant enum: -> { OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto } + variant enum: -> { OpenAI::Chat::ChatCompletionToolChoiceOption::Auto } + + # Constrains the tools available to the model to a pre-defined set. + variant -> { OpenAI::Chat::ChatCompletionAllowedToolChoice } # Specifies a tool the model should use. Use to force the model to call a specific function. - variant -> { OpenAI::Models::Chat::ChatCompletionNamedToolChoice } + variant -> { OpenAI::Chat::ChatCompletionNamedToolChoice } + + # Specifies a tool the model should use. Use to force the model to call a specific custom tool. + variant -> { OpenAI::Chat::ChatCompletionNamedToolChoiceCustom } - # @abstract - # # `none` means the model will not call any tool and instead generates a message. - # `auto` means the model can pick between generating a message or calling one or - # more tools. `required` means the model must call one or more tools. - class Auto < OpenAI::Enum + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools. + module Auto + extend OpenAI::Internal::Type::Enum + NONE = :none AUTO = :auto REQUIRED = :required - finalize! + # @!method self.values + # @return [Array] end + + # @!method self.variants + # @return [Array(Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom)] end end diff --git a/lib/openai/models/chat/chat_completion_tool_message_param.rb b/lib/openai/models/chat/chat_completion_tool_message_param.rb index 9ec24f99..4685c606 100644 --- a/lib/openai/models/chat/chat_completion_tool_message_param.rb +++ b/lib/openai/models/chat/chat_completion_tool_message_param.rb @@ -3,12 +3,12 @@ module OpenAI module Models module Chat - class ChatCompletionToolMessageParam < OpenAI::BaseModel + class ChatCompletionToolMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute content # The contents of the tool message. # # @return [String, Array] - required :content, union: -> { OpenAI::Models::Chat::ChatCompletionToolMessageParam::Content } + required :content, union: -> { OpenAI::Chat::ChatCompletionToolMessageParam::Content } # @!attribute role # The role of the messages author, in this case `tool`. @@ -22,26 +22,31 @@ class ChatCompletionToolMessageParam < OpenAI::BaseModel # @return [String] required :tool_call_id, String - # @!parse - # # @param content [String, Array] - # # @param tool_call_id [String] - # # @param role [Symbol, :tool] - # # - # def initialize(content:, tool_call_id:, role: :tool, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content:, tool_call_id:, role: :tool) + # @param content [String, Array] The contents of the tool message. # + # @param tool_call_id [String] Tool call that this message is responding to. + # + # @param role [Symbol, :tool] The role of the messages author, in this case `tool`. + # The contents of the tool message. - class Content < OpenAI::Union - ChatCompletionContentPartTextArray = OpenAI::ArrayOf[-> { OpenAI::Models::Chat::ChatCompletionContentPartText }] + # + # @see OpenAI::Models::Chat::ChatCompletionToolMessageParam#content + module Content + extend OpenAI::Internal::Type::Union # The contents of the tool message. variant String # An array of content parts with a defined type. For tool messages, only type `text` is supported. - variant OpenAI::Models::Chat::ChatCompletionToolMessageParam::Content::ChatCompletionContentPartTextArray + variant -> { OpenAI::Models::Chat::ChatCompletionToolMessageParam::Content::ChatCompletionContentPartTextArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + ChatCompletionContentPartTextArray = + OpenAI::Internal::Type::ArrayOf[-> { OpenAI::Chat::ChatCompletionContentPartText }] end end end diff --git a/lib/openai/models/chat/chat_completion_user_message_param.rb b/lib/openai/models/chat/chat_completion_user_message_param.rb index 9f51546a..7335c7f0 100644 --- a/lib/openai/models/chat/chat_completion_user_message_param.rb +++ b/lib/openai/models/chat/chat_completion_user_message_param.rb @@ -3,12 +3,12 @@ module OpenAI module Models module Chat - class ChatCompletionUserMessageParam < OpenAI::BaseModel + class ChatCompletionUserMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute content # The contents of the user message. # # @return [String, Array] - required :content, union: -> { OpenAI::Models::Chat::ChatCompletionUserMessageParam::Content } + required :content, union: -> { OpenAI::Chat::ChatCompletionUserMessageParam::Content } # @!attribute role # The role of the messages author, in this case `user`. @@ -16,40 +16,44 @@ class ChatCompletionUserMessageParam < OpenAI::BaseModel # @return [Symbol, :user] required :role, const: :user - # @!attribute [r] name + # @!attribute name # An optional name for the participant. Provides the model information to - # differentiate between participants of the same role. + # differentiate between participants of the same role. # # @return [String, nil] optional :name, String - # @!parse - # # @return [String] - # attr_writer :name - - # @!parse - # # Messages sent by an end user, containing prompts or additional context - # # information. - # # - # # @param content [String, Array] - # # @param name [String] - # # @param role [Symbol, :user] - # # - # def initialize(content:, name: nil, role: :user, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content:, name: nil, role: :user) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionUserMessageParam} for more details. + # + # Messages sent by an end user, containing prompts or additional context + # information. + # + # @param content [String, Array] The contents of the user message. # + # @param name [String] An optional name for the participant. Provides the model information to differen + # + # @param role [Symbol, :user] The role of the messages author, in this case `user`. + # The contents of the user message. - class Content < OpenAI::Union - ChatCompletionContentPartArray = OpenAI::ArrayOf[union: -> { OpenAI::Models::Chat::ChatCompletionContentPart }] + # + # @see OpenAI::Models::Chat::ChatCompletionUserMessageParam#content + module Content + extend OpenAI::Internal::Type::Union # The text contents of the message. variant String # An array of content parts with a defined type. Supported options differ based on the [model](https://platform.openai.com/docs/models) being used to generate the response. Can contain text, image, or audio inputs. - variant OpenAI::Models::Chat::ChatCompletionUserMessageParam::Content::ChatCompletionContentPartArray + variant -> { OpenAI::Models::Chat::ChatCompletionUserMessageParam::Content::ChatCompletionContentPartArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + ChatCompletionContentPartArray = + OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Chat::ChatCompletionContentPart }] end end end diff --git a/lib/openai/models/chat/completion_create_params.rb b/lib/openai/models/chat/completion_create_params.rb index b77143a8..31eaede1 100644 --- a/lib/openai/models/chat/completion_create_params.rb +++ b/lib/openai/models/chat/completion_create_params.rb @@ -3,645 +3,690 @@ module OpenAI module Models module Chat - class CompletionCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Chat::Completions#create + # + # @see OpenAI::Resources::Chat::Completions#stream_raw + class CompletionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute messages # A list of messages comprising the conversation so far. Depending on the - # [model](https://platform.openai.com/docs/models) you use, different message - # types (modalities) are supported, like - # [text](https://platform.openai.com/docs/guides/text-generation), - # [images](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio). + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). # # @return [Array] - required :messages, -> { OpenAI::ArrayOf[union: OpenAI::Models::Chat::ChatCompletionMessageParam] } + required :messages, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionMessageParam] } # @!attribute model - # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. # # @return [String, Symbol, OpenAI::Models::ChatModel] - required :model, union: -> { OpenAI::Models::Chat::CompletionCreateParams::Model } + required :model, union: -> { OpenAI::Chat::CompletionCreateParams::Model } # @!attribute audio # Parameters for audio output. Required when audio output is requested with - # `modalities: ["audio"]`. - # [Learn more](https://platform.openai.com/docs/guides/audio). + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). # # @return [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] - optional :audio, -> { OpenAI::Models::Chat::ChatCompletionAudioParam }, nil?: true + optional :audio, -> { OpenAI::Chat::ChatCompletionAudioParam }, nil?: true # @!attribute frequency_penalty # Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. # # @return [Float, nil] optional :frequency_penalty, Float, nil?: true - # @!attribute [r] function_call + # @!attribute function_call + # @deprecated + # # Deprecated in favor of `tool_choice`. # - # Controls which (if any) function is called by the model. + # Controls which (if any) function is called by the model. # - # `none` means the model will not call a function and instead generates a message. + # `none` means the model will not call a function and instead generates a message. # - # `auto` means the model can pick between generating a message or calling a - # function. + # `auto` means the model can pick between generating a message or calling a + # function. # - # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. # - # `none` is the default when no functions are present. `auto` is the default if - # functions are present. + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. # # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption, nil] - optional :function_call, union: -> { OpenAI::Models::Chat::CompletionCreateParams::FunctionCall } + optional :function_call, union: -> { OpenAI::Chat::CompletionCreateParams::FunctionCall } - # @!parse - # # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] - # attr_writer :function_call - - # @!attribute [r] functions + # @!attribute functions + # @deprecated + # # Deprecated in favor of `tools`. # - # A list of functions the model may generate JSON inputs for. + # A list of functions the model may generate JSON inputs for. # # @return [Array, nil] - optional :functions, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::CompletionCreateParams::Function] } - - # @!parse - # # @return [Array] - # attr_writer :functions + optional :functions, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::CompletionCreateParams::Function] } # @!attribute logit_bias # Modify the likelihood of specified tokens appearing in the completion. # - # Accepts a JSON object that maps tokens (specified by their token ID in the - # tokenizer) to an associated bias value from -100 to 100. Mathematically, the - # bias is added to the logits generated by the model prior to sampling. The exact - # effect will vary per model, but values between -1 and 1 should decrease or - # increase likelihood of selection; values like -100 or 100 should result in a ban - # or exclusive selection of the relevant token. + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. # # @return [Hash{Symbol=>Integer}, nil] - optional :logit_bias, OpenAI::HashOf[Integer], nil?: true + optional :logit_bias, OpenAI::Internal::Type::HashOf[Integer], nil?: true # @!attribute logprobs # Whether to return log probabilities of the output tokens or not. If true, - # returns the log probabilities of each output token returned in the `content` of - # `message`. + # returns the log probabilities of each output token returned in the `content` of + # `message`. # # @return [Boolean, nil] - optional :logprobs, OpenAI::BooleanModel, nil?: true + optional :logprobs, OpenAI::Internal::Type::Boolean, nil?: true # @!attribute max_completion_tokens # An upper bound for the number of tokens that can be generated for a completion, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). # # @return [Integer, nil] optional :max_completion_tokens, Integer, nil?: true # @!attribute max_tokens + # @deprecated + # # The maximum number of [tokens](/tokenizer) that can be generated in the chat - # completion. This value can be used to control - # [costs](https://openai.com/api/pricing/) for text generated via API. + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. # - # This value is now deprecated in favor of `max_completion_tokens`, and is not - # compatible with - # [o1 series models](https://platform.openai.com/docs/guides/reasoning). + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o-series models](https://platform.openai.com/docs/guides/reasoning). # # @return [Integer, nil] optional :max_tokens, Integer, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute modalities # Output types that you would like the model to generate. Most models are capable - # of generating text, which is the default: + # of generating text, which is the default: # - # `["text"]` + # `["text"]` # - # The `gpt-4o-audio-preview` model can also be used to - # [generate audio](https://platform.openai.com/docs/guides/audio). To request that - # this model generate both text and audio responses, you can use: + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: # - # `["text", "audio"]` + # `["text", "audio"]` # # @return [Array, nil] optional :modalities, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Chat::CompletionCreateParams::Modality] }, + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Chat::CompletionCreateParams::Modality] }, nil?: true # @!attribute n # How many chat completion choices to generate for each input message. Note that - # you will be charged based on the number of generated tokens across all of the - # choices. Keep `n` as `1` to minimize costs. + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. # # @return [Integer, nil] optional :n, Integer, nil?: true - # @!attribute [r] parallel_tool_calls + # @!attribute parallel_tool_calls # Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. # # @return [Boolean, nil] - optional :parallel_tool_calls, OpenAI::BooleanModel - - # @!parse - # # @return [Boolean] - # attr_writer :parallel_tool_calls + optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean # @!attribute prediction # Static predicted output content, such as the content of a text file that is - # being regenerated. + # being regenerated. # # @return [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] - optional :prediction, -> { OpenAI::Models::Chat::ChatCompletionPredictionContent }, nil?: true + optional :prediction, -> { OpenAI::Chat::ChatCompletionPredictionContent }, nil?: true # @!attribute presence_penalty # Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. # # @return [Float, nil] optional :presence_penalty, Float, nil?: true - # @!attribute reasoning_effort - # **o-series models only** + # @!attribute prompt_cache_key + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # @return [String, nil] + optional :prompt_cache_key, String + + # @!attribute reasoning_effort + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] - optional :reasoning_effort, enum: -> { OpenAI::Models::ReasoningEffort }, nil?: true + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true - # @!attribute [r] response_format + # @!attribute response_format # An object specifying the format that the model must output. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. # # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject, nil] - optional :response_format, union: -> { OpenAI::Models::Chat::CompletionCreateParams::ResponseFormat } + optional :response_format, union: -> { OpenAI::Chat::CompletionCreateParams::ResponseFormat } - # @!parse - # # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] - # attr_writer :response_format + # @!attribute safety_identifier + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + # + # @return [String, nil] + optional :safety_identifier, String # @!attribute seed + # @deprecated + # # This feature is in Beta. If specified, our system will make a best effort to - # sample deterministically, such that repeated requests with the same `seed` and - # parameters should return the same result. Determinism is not guaranteed, and you - # should refer to the `system_fingerprint` response parameter to monitor changes - # in the backend. + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. # # @return [Integer, nil] optional :seed, Integer, nil?: true # @!attribute service_tier - # Specifies the latency tier to use for processing the request. This parameter is - # relevant for customers subscribed to the scale tier service: - # - # - If set to 'auto', and the Project is Scale tier enabled, the system will - # utilize scale tier credits until they are exhausted. - # - If set to 'auto', and the Project is not Scale tier enabled, the request will - # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. - # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. - # - When not set, the default behavior is 'auto'. + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. # - # When this parameter is set, the response body will include the `service_tier` - # utilized. + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. # # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] - optional :service_tier, enum: -> { OpenAI::Models::Chat::CompletionCreateParams::ServiceTier }, nil?: true + optional :service_tier, enum: -> { OpenAI::Chat::CompletionCreateParams::ServiceTier }, nil?: true # @!attribute stop + # Not supported with latest reasoning models `o3` and `o4-mini`. + # # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. # # @return [String, Array, nil] - optional :stop, union: -> { OpenAI::Models::Chat::CompletionCreateParams::Stop }, nil?: true + optional :stop, union: -> { OpenAI::Chat::CompletionCreateParams::Stop }, nil?: true # @!attribute store # Whether or not to store the output of this chat completion request for use in - # our [model distillation](https://platform.openai.com/docs/guides/distillation) - # or [evals](https://platform.openai.com/docs/guides/evals) products. + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. + # + # Supports text and image inputs. Note: image inputs over 8MB will be dropped. # # @return [Boolean, nil] - optional :store, OpenAI::BooleanModel, nil?: true + optional :store, OpenAI::Internal::Type::Boolean, nil?: true # @!attribute stream_options # Options for streaming response. Only set this when you set `stream: true`. # # @return [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] - optional :stream_options, -> { OpenAI::Models::Chat::ChatCompletionStreamOptions }, nil?: true + optional :stream_options, -> { OpenAI::Chat::ChatCompletionStreamOptions }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. # # @return [Float, nil] optional :temperature, Float, nil?: true - # @!attribute [r] tool_choice + # @!attribute tool_choice # Controls which (if any) tool is called by the model. `none` means the model will - # not call any tool and instead generates a message. `auto` means the model can - # pick between generating a message or calling one or more tools. `required` means - # the model must call one or more tools. Specifying a particular tool via - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. # - # `none` is the default when no tools are present. `auto` is the default if tools - # are present. + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. # - # @return [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, nil] - optional :tool_choice, union: -> { OpenAI::Models::Chat::ChatCompletionToolChoiceOption } - - # @!parse - # # @return [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] - # attr_writer :tool_choice + # @return [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom, nil] + optional :tool_choice, union: -> { OpenAI::Chat::ChatCompletionToolChoiceOption } - # @!attribute [r] tools - # A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # @!attribute tools + # A list of tools the model may call. You can provide either + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # or [function tools](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] - optional :tools, -> { OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletionTool] } - - # @!parse - # # @return [Array] - # attr_writer :tools + # @return [Array, nil] + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionTool] } # @!attribute top_logprobs # An integer between 0 and 20 specifying the number of most likely tokens to - # return at each token position, each with an associated log probability. - # `logprobs` must be set to `true` if this parameter is used. + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. # # @return [Integer, nil] optional :top_logprobs, Integer, nil?: true # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true - # @!attribute [r] user - # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @!attribute user + # @deprecated + # + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). # # @return [String, nil] optional :user, String - # @!parse - # # @return [String] - # attr_writer :user + # @!attribute verbosity + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + # + # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Verbosity, nil] + optional :verbosity, enum: -> { OpenAI::Chat::CompletionCreateParams::Verbosity }, nil?: true - # @!attribute [r] web_search_options + # @!attribute web_search_options # This tool searches the web for relevant results to use in a response. Learn more - # about the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). # # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil] - optional :web_search_options, -> { OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions } - - # @!parse - # # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] - # attr_writer :web_search_options - - # @!parse - # # @param messages [Array] - # # @param model [String, Symbol, OpenAI::Models::ChatModel] - # # @param audio [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] - # # @param frequency_penalty [Float, nil] - # # @param function_call [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] - # # @param functions [Array] - # # @param logit_bias [Hash{Symbol=>Integer}, nil] - # # @param logprobs [Boolean, nil] - # # @param max_completion_tokens [Integer, nil] - # # @param max_tokens [Integer, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param modalities [Array, nil] - # # @param n [Integer, nil] - # # @param parallel_tool_calls [Boolean] - # # @param prediction [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] - # # @param presence_penalty [Float, nil] - # # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] - # # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] - # # @param seed [Integer, nil] - # # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] - # # @param stop [String, Array, nil] - # # @param store [Boolean, nil] - # # @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] - # # @param temperature [Float, nil] - # # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] - # # @param tools [Array] - # # @param top_logprobs [Integer, nil] - # # @param top_p [Float, nil] - # # @param user [String] - # # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # messages:, - # model:, - # audio: nil, - # frequency_penalty: nil, - # function_call: nil, - # functions: nil, - # logit_bias: nil, - # logprobs: nil, - # max_completion_tokens: nil, - # max_tokens: nil, - # metadata: nil, - # modalities: nil, - # n: nil, - # parallel_tool_calls: nil, - # prediction: nil, - # presence_penalty: nil, - # reasoning_effort: nil, - # response_format: nil, - # seed: nil, - # service_tier: nil, - # stop: nil, - # store: nil, - # stream_options: nil, - # temperature: nil, - # tool_choice: nil, - # tools: nil, - # top_logprobs: nil, - # top_p: nil, - # user: nil, - # web_search_options: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. - class Model < OpenAI::Union + optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions } + + # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionCreateParams} for more details. + # + # @param messages [Array] A list of messages comprising the conversation so far. Depending on the + # + # @param model [String, Symbol, OpenAI::Models::ChatModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI + # + # @param audio [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] Parameters for audio output. Required when audio output is requested with + # + # @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on + # + # @param function_call [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] Deprecated in favor of `tool_choice`. + # + # @param functions [Array] Deprecated in favor of `tools`. + # + # @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion. + # + # @param logprobs [Boolean, nil] Whether to return log probabilities of the output tokens or not. If true, + # + # @param max_completion_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a completion, + # + # @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param modalities [Array, nil] Output types that you would like the model to generate. + # + # @param n [Integer, nil] How many chat completion choices to generate for each input message. Note that y + # + # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g + # + # @param prediction [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] Static predicted output content, such as the content of a text file that is + # + # @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on + # + # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi + # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # + # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. + # + # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi + # + # @param seed [Integer, nil] This feature is in Beta. + # + # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request. + # + # @param stop [String, Array, nil] Not supported with latest reasoning models `o3` and `o4-mini`. + # + # @param store [Boolean, nil] Whether or not to store the output of this chat completion request for + # + # @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`. + # + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m + # + # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model. + # + # @param tools [Array] A list of tools the model may call. You can provide either + # + # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to + # + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, + # + # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # + # @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in + # + # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + module Model + extend OpenAI::Internal::Type::Union + variant String - # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI # offers a wide range of models with different capabilities, performance # characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) # to browse and compare available models. - variant enum: -> { OpenAI::Models::ChatModel } + variant enum: -> { OpenAI::ChatModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::ChatModel)] end - # @abstract - # # @deprecated # # Deprecated in favor of `tool_choice`. # - # Controls which (if any) function is called by the model. + # Controls which (if any) function is called by the model. # - # `none` means the model will not call a function and instead generates a message. + # `none` means the model will not call a function and instead generates a message. # - # `auto` means the model can pick between generating a message or calling a - # function. + # `auto` means the model can pick between generating a message or calling a + # function. # - # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. # - # `none` is the default when no functions are present. `auto` is the default if - # functions are present. - class FunctionCall < OpenAI::Union + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. + module FunctionCall + extend OpenAI::Internal::Type::Union + # `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. - variant enum: -> { OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode } + variant enum: -> { OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode } # Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - variant -> { OpenAI::Models::Chat::ChatCompletionFunctionCallOption } + variant -> { OpenAI::Chat::ChatCompletionFunctionCallOption } - # @abstract - # # `none` means the model will not call a function and instead generates a message. - # `auto` means the model can pick between generating a message or calling a - # function. - class FunctionCallMode < OpenAI::Enum + # `auto` means the model can pick between generating a message or calling a + # function. + module FunctionCallMode + extend OpenAI::Internal::Type::Enum + NONE = :none AUTO = :auto - finalize! + # @!method self.values + # @return [Array] end + + # @!method self.variants + # @return [Array(Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption)] end # @deprecated - # - class Function < OpenAI::BaseModel + class Function < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain - # underscores and dashes, with a maximum length of 64. + # underscores and dashes, with a maximum length of 64. # # @return [String] required :name, String - # @!attribute [r] description + # @!attribute description # A description of what the function does, used by the model to choose when and - # how to call the function. + # how to call the function. # # @return [String, nil] optional :description, String - # @!parse - # # @return [String] - # attr_writer :description - - # @!attribute [r] parameters + # @!attribute parameters # The parameters the functions accepts, described as a JSON Schema object. See the - # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, - # and the - # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - # documentation about the format. + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. # - # Omitting `parameters` defines a function with an empty parameter list. + # Omitting `parameters` defines a function with an empty parameter list. # # @return [Hash{Symbol=>Object}, nil] - optional :parameters, OpenAI::HashOf[OpenAI::Unknown] - - # @!parse - # # @return [Hash{Symbol=>Object}] - # attr_writer :parameters + optional :parameters, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] - # @!parse - # # @param name [String] - # # @param description [String] - # # @param parameters [Hash{Symbol=>Object}] - # # - # def initialize(name:, description: nil, parameters: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(name:, description: nil, parameters: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionCreateParams::Function} for more details. + # + # @param name [String] The name of the function to be called. Must be a-z, A-Z, 0-9, or contain undersc + # + # @param description [String] A description of what the function does, used by the model to choose when and ho + # + # @param parameters [Hash{Symbol=>Object}] The parameters the functions accepts, described as a JSON Schema object. See the end - # @abstract - # - class Modality < OpenAI::Enum + module Modality + extend OpenAI::Internal::Type::Enum + TEXT = :text AUDIO = :audio - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # An object specifying the format that the model must output. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. - class ResponseFormat < OpenAI::Union + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + module ResponseFormat + extend OpenAI::Internal::Type::Union + # Default response format. Used to generate text responses. - variant -> { OpenAI::Models::ResponseFormatText } + variant -> { OpenAI::ResponseFormatText } # JSON Schema response format. Used to generate structured JSON responses. # Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). - variant -> { OpenAI::Models::ResponseFormatJSONSchema } + variant -> { OpenAI::ResponseFormatJSONSchema } # JSON object response format. An older method of generating JSON responses. # Using `json_schema` is recommended for models that support it. Note that the # model will not generate JSON without a system or user message instructing it # to do so. - variant -> { OpenAI::Models::ResponseFormatJSONObject } + variant -> { OpenAI::ResponseFormatJSONObject } + + # @!method self.variants + # @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject)] end - # @abstract - # - # Specifies the latency tier to use for processing the request. This parameter is - # relevant for customers subscribed to the scale tier service: - # - # - If set to 'auto', and the Project is Scale tier enabled, the system will - # utilize scale tier credits until they are exhausted. - # - If set to 'auto', and the Project is not Scale tier enabled, the request will - # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. - # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. - # - When not set, the default behavior is 'auto'. - # - # When this parameter is set, the response body will include the `service_tier` - # utilized. - class ServiceTier < OpenAI::Enum + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + module ServiceTier + extend OpenAI::Internal::Type::Enum + AUTO = :auto DEFAULT = :default + FLEX = :flex + SCALE = :scale + PRIORITY = :priority - finalize! + # @!method self.values + # @return [Array] end - # @abstract + # Not supported with latest reasoning models `o3` and `o4-mini`. # # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. - class Stop < OpenAI::Union - StringArray = OpenAI::ArrayOf[String] + # returned text will not contain the stop sequence. + module Stop + extend OpenAI::Internal::Type::Union variant String - variant OpenAI::Models::Chat::CompletionCreateParams::Stop::StringArray + variant -> { OpenAI::Models::Chat::CompletionCreateParams::Stop::StringArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + StringArray = OpenAI::Internal::Type::ArrayOf[String] end - class WebSearchOptions < OpenAI::BaseModel - # @!attribute [r] search_context_size + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + module Verbosity + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + + # @!method self.values + # @return [Array] + end + + class WebSearchOptions < OpenAI::Internal::Type::BaseModel + # @!attribute search_context_size # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. + # search. One of `low`, `medium`, or `high`. `medium` is the default. # # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize, nil] optional :search_context_size, - enum: -> { OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize } - - # @!parse - # # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize] - # attr_writer :search_context_size + enum: -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize } # @!attribute user_location # Approximate location parameters for the search. # # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation, nil] optional :user_location, - -> { OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation }, + -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation }, nil?: true - # @!parse - # # This tool searches the web for relevant results to use in a response. Learn more - # # about the - # # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). - # # - # # @param search_context_size [Symbol, OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize] - # # @param user_location [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation, nil] - # # - # def initialize(search_context_size: nil, user_location: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(search_context_size: nil, user_location: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions} for more + # details. + # + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). # + # @param search_context_size [Symbol, OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize] High level guidance for the amount of context window space to use for the + # + # @param user_location [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation, nil] Approximate location parameters for the search. + # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. - class SearchContextSize < OpenAI::Enum + # search. One of `low`, `medium`, or `high`. `medium` is the default. + # + # @see OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions#search_context_size + module SearchContextSize + extend OpenAI::Internal::Type::Enum + LOW = :low MEDIUM = :medium HIGH = :high - finalize! + # @!method self.values + # @return [Array] end - class UserLocation < OpenAI::BaseModel + # @see OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions#user_location + class UserLocation < OpenAI::Internal::Type::BaseModel # @!attribute approximate # Approximate location parameters for the search. # # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate] required :approximate, - -> { OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate } + -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate } # @!attribute type # The type of location approximation. Always `approximate`. @@ -649,70 +694,59 @@ class UserLocation < OpenAI::BaseModel # @return [Symbol, :approximate] required :type, const: :approximate - # @!parse - # # Approximate location parameters for the search. - # # - # # @param approximate [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate] - # # @param type [Symbol, :approximate] - # # - # def initialize(approximate:, type: :approximate, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(approximate:, type: :approximate) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation} + # for more details. + # + # Approximate location parameters for the search. + # + # @param approximate [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate] Approximate location parameters for the search. + # + # @param type [Symbol, :approximate] The type of location approximation. Always `approximate`. - class Approximate < OpenAI::BaseModel - # @!attribute [r] city + # @see OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation#approximate + class Approximate < OpenAI::Internal::Type::BaseModel + # @!attribute city # Free text input for the city of the user, e.g. `San Francisco`. # # @return [String, nil] optional :city, String - # @!parse - # # @return [String] - # attr_writer :city - - # @!attribute [r] country + # @!attribute country # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of - # the user, e.g. `US`. + # the user, e.g. `US`. # # @return [String, nil] optional :country, String - # @!parse - # # @return [String] - # attr_writer :country - - # @!attribute [r] region + # @!attribute region # Free text input for the region of the user, e.g. `California`. # # @return [String, nil] optional :region, String - # @!parse - # # @return [String] - # attr_writer :region - - # @!attribute [r] timezone + # @!attribute timezone # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the - # user, e.g. `America/Los_Angeles`. + # user, e.g. `America/Los_Angeles`. # # @return [String, nil] optional :timezone, String - # @!parse - # # @return [String] - # attr_writer :timezone - - # @!parse - # # Approximate location parameters for the search. - # # - # # @param city [String] - # # @param country [String] - # # @param region [String] - # # @param timezone [String] - # # - # def initialize(city: nil, country: nil, region: nil, timezone: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(city: nil, country: nil, region: nil, timezone: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate} + # for more details. + # + # Approximate location parameters for the search. + # + # @param city [String] Free text input for the city of the user, e.g. `San Francisco`. + # + # @param country [String] The two-letter + # + # @param region [String] Free text input for the region of the user, e.g. `California`. + # + # @param timezone [String] The [IANA timezone](https://timeapi.io/documentation/iana-timezones) end end end diff --git a/lib/openai/models/chat/completion_delete_params.rb b/lib/openai/models/chat/completion_delete_params.rb index d3a43ac0..819d9af4 100644 --- a/lib/openai/models/chat/completion_delete_params.rb +++ b/lib/openai/models/chat/completion_delete_params.rb @@ -3,17 +3,13 @@ module OpenAI module Models module Chat - class CompletionDeleteParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Chat::Completions#delete + class CompletionDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/chat/completion_list_params.rb b/lib/openai/models/chat/completion_list_params.rb index 81936268..0e18202c 100644 --- a/lib/openai/models/chat/completion_list_params.rb +++ b/lib/openai/models/chat/completion_list_params.rb @@ -3,81 +3,70 @@ module OpenAI module Models module Chat - class CompletionListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Chat::Completions#list + class CompletionListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # Identifier for the last chat completion from the previous pagination request. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] limit + # @!attribute limit # Number of Chat Completions to retrieve. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - # @!attribute metadata # A list of metadata keys to filter the Chat Completions by. Example: # - # `metadata[key1]=value1&metadata[key2]=value2` + # `metadata[key1]=value1&metadata[key2]=value2` # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!attribute [r] model + # @!attribute model # The model used to generate the Chat Completions. # # @return [String, nil] optional :model, String - # @!parse - # # @return [String] - # attr_writer :model - - # @!attribute [r] order + # @!attribute order # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or - # `desc` for descending order. Defaults to `asc`. + # `desc` for descending order. Defaults to `asc`. # # @return [Symbol, OpenAI::Models::Chat::CompletionListParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::Chat::CompletionListParams::Order } + optional :order, enum: -> { OpenAI::Chat::CompletionListParams::Order } - # @!parse - # # @return [Symbol, OpenAI::Models::Chat::CompletionListParams::Order] - # attr_writer :order - - # @!parse - # # @param after [String] - # # @param limit [Integer] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param model [String] - # # @param order [Symbol, OpenAI::Models::Chat::CompletionListParams::Order] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionListParams} for more details. + # + # @param after [String] Identifier for the last chat completion from the previous pagination request. # + # @param limit [Integer] Number of Chat Completions to retrieve. + # + # @param metadata [Hash{Symbol=>String}, nil] A list of metadata keys to filter the Chat Completions by. Example: + # + # @param model [String] The model used to generate the Chat Completions. + # + # @param order [Symbol, OpenAI::Models::Chat::CompletionListParams::Order] Sort order for Chat Completions by timestamp. Use `asc` for ascending order or ` + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or - # `desc` for descending order. Defaults to `asc`. - class Order < OpenAI::Enum + # `desc` for descending order. Defaults to `asc`. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/chat/completion_retrieve_params.rb b/lib/openai/models/chat/completion_retrieve_params.rb index 248e8caf..50730021 100644 --- a/lib/openai/models/chat/completion_retrieve_params.rb +++ b/lib/openai/models/chat/completion_retrieve_params.rb @@ -3,17 +3,13 @@ module OpenAI module Models module Chat - class CompletionRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Chat::Completions#retrieve + class CompletionRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/chat/completion_update_params.rb b/lib/openai/models/chat/completion_update_params.rb index bc642e24..c30b1a6b 100644 --- a/lib/openai/models/chat/completion_update_params.rb +++ b/lib/openai/models/chat/completion_update_params.rb @@ -3,29 +3,29 @@ module OpenAI module Models module Chat - class CompletionUpdateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Chat::Completions#update + class CompletionUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - required :metadata, OpenAI::HashOf[String], nil?: true + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!parse - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(metadata:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(metadata:, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionUpdateParams} for more details. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/chat/completions/message_list_params.rb b/lib/openai/models/chat/completions/message_list_params.rb index 66af6ada..8f2c139c 100644 --- a/lib/openai/models/chat/completions/message_list_params.rb +++ b/lib/openai/models/chat/completions/message_list_params.rb @@ -4,61 +4,52 @@ module OpenAI module Models module Chat module Completions - class MessageListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Chat::Completions::Messages#list + class MessageListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # Identifier for the last message from the previous pagination request. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] limit + # @!attribute limit # Number of messages to retrieve. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!attribute [r] order + # @!attribute order # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` - # for descending order. Defaults to `asc`. + # for descending order. Defaults to `asc`. # # @return [Symbol, OpenAI::Models::Chat::Completions::MessageListParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::Chat::Completions::MessageListParams::Order } - - # @!parse - # # @return [Symbol, OpenAI::Models::Chat::Completions::MessageListParams::Order] - # attr_writer :order - - # @!parse - # # @param after [String] - # # @param limit [Integer] - # # @param order [Symbol, OpenAI::Models::Chat::Completions::MessageListParams::Order] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, limit: nil, order: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :order, enum: -> { OpenAI::Chat::Completions::MessageListParams::Order } - # @abstract + # @!method initialize(after: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::Completions::MessageListParams} for more details. # + # @param after [String] Identifier for the last message from the previous pagination request. + # + # @param limit [Integer] Number of messages to retrieve. + # + # @param order [Symbol, OpenAI::Models::Chat::Completions::MessageListParams::Order] Sort order for messages by timestamp. Use `asc` for ascending order or `desc` fo + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` - # for descending order. Defaults to `asc`. - class Order < OpenAI::Enum + # for descending order. Defaults to `asc`. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/chat_model.rb b/lib/openai/models/chat_model.rb index 7b512490..adeb1665 100644 --- a/lib/openai/models/chat_model.rb +++ b/lib/openai/models/chat_model.rb @@ -2,9 +2,26 @@ module OpenAI module Models - # @abstract - # - class ChatModel < OpenAI::Enum + module ChatModel + extend OpenAI::Internal::Type::Enum + + GPT_5 = :"gpt-5" + GPT_5_MINI = :"gpt-5-mini" + GPT_5_NANO = :"gpt-5-nano" + GPT_5_2025_08_07 = :"gpt-5-2025-08-07" + GPT_5_MINI_2025_08_07 = :"gpt-5-mini-2025-08-07" + GPT_5_NANO_2025_08_07 = :"gpt-5-nano-2025-08-07" + GPT_5_CHAT_LATEST = :"gpt-5-chat-latest" + GPT_4_1 = :"gpt-4.1" + GPT_4_1_MINI = :"gpt-4.1-mini" + GPT_4_1_NANO = :"gpt-4.1-nano" + GPT_4_1_2025_04_14 = :"gpt-4.1-2025-04-14" + GPT_4_1_MINI_2025_04_14 = :"gpt-4.1-mini-2025-04-14" + GPT_4_1_NANO_2025_04_14 = :"gpt-4.1-nano-2025-04-14" + O4_MINI = :"o4-mini" + O4_MINI_2025_04_16 = :"o4-mini-2025-04-16" + O3 = :o3 + O3_2025_04_16 = :"o3-2025-04-16" O3_MINI = :"o3-mini" O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" O1 = :o1 @@ -13,11 +30,6 @@ class ChatModel < OpenAI::Enum O1_PREVIEW_2024_09_12 = :"o1-preview-2024-09-12" O1_MINI = :"o1-mini" O1_MINI_2024_09_12 = :"o1-mini-2024-09-12" - COMPUTER_USE_PREVIEW = :"computer-use-preview" - COMPUTER_USE_PREVIEW_2025_02_04 = :"computer-use-preview-2025-02-04" - COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11" - GPT_4_5_PREVIEW = :"gpt-4.5-preview" - GPT_4_5_PREVIEW_2025_02_27 = :"gpt-4.5-preview-2025-02-27" GPT_4O = :"gpt-4o" GPT_4O_2024_11_20 = :"gpt-4o-2024-11-20" GPT_4O_2024_08_06 = :"gpt-4o-2024-08-06" @@ -25,9 +37,15 @@ class ChatModel < OpenAI::Enum GPT_4O_AUDIO_PREVIEW = :"gpt-4o-audio-preview" GPT_4O_AUDIO_PREVIEW_2024_10_01 = :"gpt-4o-audio-preview-2024-10-01" GPT_4O_AUDIO_PREVIEW_2024_12_17 = :"gpt-4o-audio-preview-2024-12-17" + GPT_4O_AUDIO_PREVIEW_2025_06_03 = :"gpt-4o-audio-preview-2025-06-03" GPT_4O_MINI_AUDIO_PREVIEW = :"gpt-4o-mini-audio-preview" GPT_4O_MINI_AUDIO_PREVIEW_2024_12_17 = :"gpt-4o-mini-audio-preview-2024-12-17" + GPT_4O_SEARCH_PREVIEW = :"gpt-4o-search-preview" + GPT_4O_MINI_SEARCH_PREVIEW = :"gpt-4o-mini-search-preview" + GPT_4O_SEARCH_PREVIEW_2025_03_11 = :"gpt-4o-search-preview-2025-03-11" + GPT_4O_MINI_SEARCH_PREVIEW_2025_03_11 = :"gpt-4o-mini-search-preview-2025-03-11" CHATGPT_4O_LATEST = :"chatgpt-4o-latest" + CODEX_MINI_LATEST = :"codex-mini-latest" GPT_4O_MINI = :"gpt-4o-mini" GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18" GPT_4_TURBO = :"gpt-4-turbo" @@ -50,7 +68,8 @@ class ChatModel < OpenAI::Enum GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125" GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613" - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/comparison_filter.rb b/lib/openai/models/comparison_filter.rb index 5e74b3b4..a8a32298 100644 --- a/lib/openai/models/comparison_filter.rb +++ b/lib/openai/models/comparison_filter.rb @@ -2,7 +2,7 @@ module OpenAI module Models - class ComparisonFilter < OpenAI::BaseModel + class ComparisonFilter < OpenAI::Internal::Type::BaseModel # @!attribute key # The key to compare against the value. # @@ -12,46 +12,49 @@ class ComparisonFilter < OpenAI::BaseModel # @!attribute type # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. # - # - `eq`: equals - # - `ne`: not equal - # - `gt`: greater than - # - `gte`: greater than or equal - # - `lt`: less than - # - `lte`: less than or equal + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal # # @return [Symbol, OpenAI::Models::ComparisonFilter::Type] - required :type, enum: -> { OpenAI::Models::ComparisonFilter::Type } + required :type, enum: -> { OpenAI::ComparisonFilter::Type } # @!attribute value # The value to compare against the attribute key; supports string, number, or - # boolean types. + # boolean types. # # @return [String, Float, Boolean] - required :value, union: -> { OpenAI::Models::ComparisonFilter::Value } + required :value, union: -> { OpenAI::ComparisonFilter::Value } - # @!parse - # # A filter used to compare a specified attribute key to a given value using a - # # defined comparison operation. - # # - # # @param key [String] - # # @param type [Symbol, OpenAI::Models::ComparisonFilter::Type] - # # @param value [String, Float, Boolean] - # # - # def initialize(key:, type:, value:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(key:, type:, value:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ComparisonFilter} for more details. + # + # A filter used to compare a specified attribute key to a given value using a + # defined comparison operation. # + # @param key [String] The key to compare against the value. + # + # @param type [Symbol, OpenAI::Models::ComparisonFilter::Type] Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + # + # @param value [String, Float, Boolean] The value to compare against the attribute key; supports string, number, or bool + # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. # - # - `eq`: equals - # - `ne`: not equal - # - `gt`: greater than - # - `gte`: greater than or equal - # - `lt`: less than - # - `lte`: less than or equal - class Type < OpenAI::Enum + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal + # + # @see OpenAI::Models::ComparisonFilter#type + module Type + extend OpenAI::Internal::Type::Enum + EQ = :eq NE = :ne GT = :gt @@ -59,19 +62,25 @@ class Type < OpenAI::Enum LT = :lt LTE = :lte - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The value to compare against the attribute key; supports string, number, or - # boolean types. - class Value < OpenAI::Union + # boolean types. + # + # @see OpenAI::Models::ComparisonFilter#value + module Value + extend OpenAI::Internal::Type::Union + variant String variant Float - variant OpenAI::BooleanModel + variant OpenAI::Internal::Type::Boolean + + # @!method self.variants + # @return [Array(String, Float, Boolean)] end end end diff --git a/lib/openai/models/completion.rb b/lib/openai/models/completion.rb index 751e0dc8..e6a702d0 100644 --- a/lib/openai/models/completion.rb +++ b/lib/openai/models/completion.rb @@ -2,7 +2,10 @@ module OpenAI module Models - class Completion < OpenAI::BaseModel + # @see OpenAI::Resources::Completions#create + # + # @see OpenAI::Resources::Completions#create_streaming + class Completion < OpenAI::Internal::Type::BaseModel # @!attribute id # A unique identifier for the completion. # @@ -13,7 +16,7 @@ class Completion < OpenAI::BaseModel # The list of completion choices the model generated for the input prompt. # # @return [Array] - required :choices, -> { OpenAI::ArrayOf[OpenAI::Models::CompletionChoice] } + required :choices, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::CompletionChoice] } # @!attribute created # The Unix timestamp (in seconds) of when the completion was created. @@ -33,44 +36,41 @@ class Completion < OpenAI::BaseModel # @return [Symbol, :text_completion] required :object, const: :text_completion - # @!attribute [r] system_fingerprint + # @!attribute system_fingerprint # This fingerprint represents the backend configuration that the model runs with. # - # Can be used in conjunction with the `seed` request parameter to understand when - # backend changes have been made that might impact determinism. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. # # @return [String, nil] optional :system_fingerprint, String - # @!parse - # # @return [String] - # attr_writer :system_fingerprint - - # @!attribute [r] usage + # @!attribute usage # Usage statistics for the completion request. # # @return [OpenAI::Models::CompletionUsage, nil] - optional :usage, -> { OpenAI::Models::CompletionUsage } - - # @!parse - # # @return [OpenAI::Models::CompletionUsage] - # attr_writer :usage - - # @!parse - # # Represents a completion response from the API. Note: both the streamed and - # # non-streamed response objects share the same shape (unlike the chat endpoint). - # # - # # @param id [String] - # # @param choices [Array] - # # @param created [Integer] - # # @param model [String] - # # @param system_fingerprint [String] - # # @param usage [OpenAI::Models::CompletionUsage] - # # @param object [Symbol, :text_completion] - # # - # def initialize(id:, choices:, created:, model:, system_fingerprint: nil, usage: nil, object: :text_completion, **) = super + optional :usage, -> { OpenAI::CompletionUsage } - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, choices:, created:, model:, system_fingerprint: nil, usage: nil, object: :text_completion) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Completion} for more details. + # + # Represents a completion response from the API. Note: both the streamed and + # non-streamed response objects share the same shape (unlike the chat endpoint). + # + # @param id [String] A unique identifier for the completion. + # + # @param choices [Array] The list of completion choices the model generated for the input prompt. + # + # @param created [Integer] The Unix timestamp (in seconds) of when the completion was created. + # + # @param model [String] The model used for completion. + # + # @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with. + # + # @param usage [OpenAI::Models::CompletionUsage] Usage statistics for the completion request. + # + # @param object [Symbol, :text_completion] The object type, which is always "text_completion" end end end diff --git a/lib/openai/models/completion_choice.rb b/lib/openai/models/completion_choice.rb index 6237b56e..07f6b428 100644 --- a/lib/openai/models/completion_choice.rb +++ b/lib/openai/models/completion_choice.rb @@ -2,15 +2,15 @@ module OpenAI module Models - class CompletionChoice < OpenAI::BaseModel + class CompletionChoice < OpenAI::Internal::Type::BaseModel # @!attribute finish_reason # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, or `content_filter` if - # content was omitted due to a flag from our content filters. + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. # # @return [Symbol, OpenAI::Models::CompletionChoice::FinishReason] - required :finish_reason, enum: -> { OpenAI::Models::CompletionChoice::FinishReason } + required :finish_reason, enum: -> { OpenAI::CompletionChoice::FinishReason } # @!attribute index # @@ -20,83 +20,69 @@ class CompletionChoice < OpenAI::BaseModel # @!attribute logprobs # # @return [OpenAI::Models::CompletionChoice::Logprobs, nil] - required :logprobs, -> { OpenAI::Models::CompletionChoice::Logprobs }, nil?: true + required :logprobs, -> { OpenAI::CompletionChoice::Logprobs }, nil?: true # @!attribute text # # @return [String] required :text, String - # @!parse - # # @param finish_reason [Symbol, OpenAI::Models::CompletionChoice::FinishReason] - # # @param index [Integer] - # # @param logprobs [OpenAI::Models::CompletionChoice::Logprobs, nil] - # # @param text [String] - # # - # def initialize(finish_reason:, index:, logprobs:, text:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(finish_reason:, index:, logprobs:, text:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::CompletionChoice} for more details. # + # @param finish_reason [Symbol, OpenAI::Models::CompletionChoice::FinishReason] The reason the model stopped generating tokens. This will be `stop` if the model + # + # @param index [Integer] + # + # @param logprobs [OpenAI::Models::CompletionChoice::Logprobs, nil] + # + # @param text [String] + # The reason the model stopped generating tokens. This will be `stop` if the model - # hit a natural stop point or a provided stop sequence, `length` if the maximum - # number of tokens specified in the request was reached, or `content_filter` if - # content was omitted due to a flag from our content filters. - class FinishReason < OpenAI::Enum + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. + # + # @see OpenAI::Models::CompletionChoice#finish_reason + module FinishReason + extend OpenAI::Internal::Type::Enum + STOP = :stop LENGTH = :length CONTENT_FILTER = :content_filter - finalize! + # @!method self.values + # @return [Array] end - class Logprobs < OpenAI::BaseModel - # @!attribute [r] text_offset + # @see OpenAI::Models::CompletionChoice#logprobs + class Logprobs < OpenAI::Internal::Type::BaseModel + # @!attribute text_offset # # @return [Array, nil] - optional :text_offset, OpenAI::ArrayOf[Integer] - - # @!parse - # # @return [Array] - # attr_writer :text_offset + optional :text_offset, OpenAI::Internal::Type::ArrayOf[Integer] - # @!attribute [r] token_logprobs + # @!attribute token_logprobs # # @return [Array, nil] - optional :token_logprobs, OpenAI::ArrayOf[Float] + optional :token_logprobs, OpenAI::Internal::Type::ArrayOf[Float] - # @!parse - # # @return [Array] - # attr_writer :token_logprobs - - # @!attribute [r] tokens + # @!attribute tokens # # @return [Array, nil] - optional :tokens, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :tokens + optional :tokens, OpenAI::Internal::Type::ArrayOf[String] - # @!attribute [r] top_logprobs + # @!attribute top_logprobs # # @return [ArrayFloat}>, nil] - optional :top_logprobs, OpenAI::ArrayOf[OpenAI::HashOf[Float]] - - # @!parse - # # @return [ArrayFloat}>] - # attr_writer :top_logprobs - - # @!parse - # # @param text_offset [Array] - # # @param token_logprobs [Array] - # # @param tokens [Array] - # # @param top_logprobs [ArrayFloat}>] - # # - # def initialize(text_offset: nil, token_logprobs: nil, tokens: nil, top_logprobs: nil, **) = super + optional :top_logprobs, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[Float]] - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(text_offset: nil, token_logprobs: nil, tokens: nil, top_logprobs: nil) + # @param text_offset [Array] + # @param token_logprobs [Array] + # @param tokens [Array] + # @param top_logprobs [ArrayFloat}>] end end end diff --git a/lib/openai/models/completion_create_params.rb b/lib/openai/models/completion_create_params.rb index b2835980..4da42de4 100644 --- a/lib/openai/models/completion_create_params.rb +++ b/lib/openai/models/completion_create_params.rb @@ -2,42 +2,44 @@ module OpenAI module Models - class CompletionCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Completions#create + # + # @see OpenAI::Resources::Completions#create_streaming + class CompletionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute model # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. # - # @return [String, Symbol, OpenAI::Models::CompletionCreateParams::Model::Preset] - required :model, union: -> { OpenAI::Models::CompletionCreateParams::Model } + # @return [String, Symbol, OpenAI::Models::CompletionCreateParams::Model] + required :model, union: -> { OpenAI::CompletionCreateParams::Model } # @!attribute prompt # The prompt(s) to generate completions for, encoded as a string, array of - # strings, array of tokens, or array of token arrays. + # strings, array of tokens, or array of token arrays. # - # Note that <|endoftext|> is the document separator that the model sees during - # training, so if a prompt is not specified the model will generate as if from the - # beginning of a new document. + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. # # @return [String, Array, Array, Array>, nil] - required :prompt, union: -> { OpenAI::Models::CompletionCreateParams::Prompt }, nil?: true + required :prompt, union: -> { OpenAI::CompletionCreateParams::Prompt }, nil?: true # @!attribute best_of # Generates `best_of` completions server-side and returns the "best" (the one with - # the highest log probability per token). Results cannot be streamed. + # the highest log probability per token). Results cannot be streamed. # - # When used with `n`, `best_of` controls the number of candidate completions and - # `n` specifies how many to return – `best_of` must be greater than `n`. + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. # # @return [Integer, nil] optional :best_of, Integer, nil?: true @@ -46,14 +48,14 @@ class CompletionCreateParams < OpenAI::BaseModel # Echo back the prompt in addition to the completion # # @return [Boolean, nil] - optional :echo, OpenAI::BooleanModel, nil?: true + optional :echo, OpenAI::Internal::Type::Boolean, nil?: true # @!attribute frequency_penalty # Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) # # @return [Float, nil] optional :frequency_penalty, Float, nil?: true @@ -61,39 +63,39 @@ class CompletionCreateParams < OpenAI::BaseModel # @!attribute logit_bias # Modify the likelihood of specified tokens appearing in the completion. # - # Accepts a JSON object that maps tokens (specified by their token ID in the GPT - # tokenizer) to an associated bias value from -100 to 100. You can use this - # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. - # Mathematically, the bias is added to the logits generated by the model prior to - # sampling. The exact effect will vary per model, but values between -1 and 1 - # should decrease or increase likelihood of selection; values like -100 or 100 - # should result in a ban or exclusive selection of the relevant token. + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. # - # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token - # from being generated. + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. # # @return [Hash{Symbol=>Integer}, nil] - optional :logit_bias, OpenAI::HashOf[Integer], nil?: true + optional :logit_bias, OpenAI::Internal::Type::HashOf[Integer], nil?: true # @!attribute logprobs # Include the log probabilities on the `logprobs` most likely output tokens, as - # well the chosen tokens. For example, if `logprobs` is 5, the API will return a - # list of the 5 most likely tokens. The API will always return the `logprob` of - # the sampled token, so there may be up to `logprobs+1` elements in the response. + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. # - # The maximum value for `logprobs` is 5. + # The maximum value for `logprobs` is 5. # # @return [Integer, nil] optional :logprobs, Integer, nil?: true # @!attribute max_tokens # The maximum number of [tokens](/tokenizer) that can be generated in the - # completion. + # completion. # - # The token count of your prompt plus `max_tokens` cannot exceed the model's - # context length. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. # # @return [Integer, nil] optional :max_tokens, Integer, nil?: true @@ -101,196 +103,203 @@ class CompletionCreateParams < OpenAI::BaseModel # @!attribute n # How many completions to generate for each prompt. # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. # # @return [Integer, nil] optional :n, Integer, nil?: true # @!attribute presence_penalty # Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) # # @return [Float, nil] optional :presence_penalty, Float, nil?: true # @!attribute seed # If specified, our system will make a best effort to sample deterministically, - # such that repeated requests with the same `seed` and parameters should return - # the same result. + # such that repeated requests with the same `seed` and parameters should return + # the same result. # - # Determinism is not guaranteed, and you should refer to the `system_fingerprint` - # response parameter to monitor changes in the backend. + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. # # @return [Integer, nil] optional :seed, Integer, nil?: true # @!attribute stop + # Not supported with latest reasoning models `o3` and `o4-mini`. + # # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # returned text will not contain the stop sequence. # # @return [String, Array, nil] - optional :stop, union: -> { OpenAI::Models::CompletionCreateParams::Stop }, nil?: true + optional :stop, union: -> { OpenAI::CompletionCreateParams::Stop }, nil?: true # @!attribute stream_options # Options for streaming response. Only set this when you set `stream: true`. # # @return [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] - optional :stream_options, -> { OpenAI::Models::Chat::ChatCompletionStreamOptions }, nil?: true + optional :stream_options, -> { OpenAI::Chat::ChatCompletionStreamOptions }, nil?: true # @!attribute suffix # The suffix that comes after a completion of inserted text. # - # This parameter is only supported for `gpt-3.5-turbo-instruct`. + # This parameter is only supported for `gpt-3.5-turbo-instruct`. # # @return [String, nil] optional :suffix, String, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. # - # We generally recommend altering this or `top_p` but not both. + # We generally recommend altering this or `top_p` but not both. # # @return [Float, nil] optional :temperature, Float, nil?: true # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true - # @!attribute [r] user + # @!attribute user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String - # @!parse - # # @return [String] - # attr_writer :user - - # @!parse - # # @param model [String, Symbol, OpenAI::Models::CompletionCreateParams::Model::Preset] - # # @param prompt [String, Array, Array, Array>, nil] - # # @param best_of [Integer, nil] - # # @param echo [Boolean, nil] - # # @param frequency_penalty [Float, nil] - # # @param logit_bias [Hash{Symbol=>Integer}, nil] - # # @param logprobs [Integer, nil] - # # @param max_tokens [Integer, nil] - # # @param n [Integer, nil] - # # @param presence_penalty [Float, nil] - # # @param seed [Integer, nil] - # # @param stop [String, Array, nil] - # # @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] - # # @param suffix [String, nil] - # # @param temperature [Float, nil] - # # @param top_p [Float, nil] - # # @param user [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # model:, - # prompt:, - # best_of: nil, - # echo: nil, - # frequency_penalty: nil, - # logit_bias: nil, - # logprobs: nil, - # max_tokens: nil, - # n: nil, - # presence_penalty: nil, - # seed: nil, - # stop: nil, - # stream_options: nil, - # suffix: nil, - # temperature: nil, - # top_p: nil, - # user: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(model:, prompt:, best_of: nil, echo: nil, frequency_penalty: nil, logit_bias: nil, logprobs: nil, max_tokens: nil, n: nil, presence_penalty: nil, seed: nil, stop: nil, stream_options: nil, suffix: nil, temperature: nil, top_p: nil, user: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::CompletionCreateParams} for more details. + # + # @param model [String, Symbol, OpenAI::Models::CompletionCreateParams::Model] ID of the model to use. You can use the [List models](https://platform.openai.co + # + # @param prompt [String, Array, Array, Array>, nil] The prompt(s) to generate completions for, encoded as a string, array of strings + # + # @param best_of [Integer, nil] Generates `best_of` completions server-side and returns the "best" (the one with + # + # @param echo [Boolean, nil] Echo back the prompt in addition to the completion + # + # @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # + # @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion. + # + # @param logprobs [Integer, nil] Include the log probabilities on the `logprobs` most likely output tokens, as we + # + # @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the completi + # + # @param n [Integer, nil] How many completions to generate for each prompt. + # + # @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on whethe + # + # @param seed [Integer, nil] If specified, our system will make a best effort to sample deterministically, su + # + # @param stop [String, Array, nil] Not supported with latest reasoning models `o3` and `o4-mini`. + # + # @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`. + # + # @param suffix [String, nil] The suffix that comes after a completion of inserted text. + # + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m + # + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - class Model < OpenAI::Union + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + module Model + extend OpenAI::Internal::Type::Union + variant String - # ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them. - variant enum: -> { OpenAI::Models::CompletionCreateParams::Model::Preset } - - # @abstract - # - # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - class Preset < OpenAI::Enum - GPT_3_5_TURBO_INSTRUCT = :"gpt-3.5-turbo-instruct" - DAVINCI_002 = :"davinci-002" - BABBAGE_002 = :"babbage-002" - - finalize! + variant const: -> { OpenAI::Models::CompletionCreateParams::Model::GPT_3_5_TURBO_INSTRUCT } + + variant const: -> { OpenAI::Models::CompletionCreateParams::Model::DAVINCI_002 } + + variant const: -> { OpenAI::Models::CompletionCreateParams::Model::BABBAGE_002 } + + # @!method self.variants + # @return [Array(String, Symbol)] + + define_sorbet_constant!(:Variants) do + T.type_alias { T.any(String, OpenAI::CompletionCreateParams::Model::TaggedSymbol) } end + + # @!group + + GPT_3_5_TURBO_INSTRUCT = :"gpt-3.5-turbo-instruct" + DAVINCI_002 = :"davinci-002" + BABBAGE_002 = :"babbage-002" + + # @!endgroup end - # @abstract - # # The prompt(s) to generate completions for, encoded as a string, array of - # strings, array of tokens, or array of token arrays. + # strings, array of tokens, or array of token arrays. # - # Note that <|endoftext|> is the document separator that the model sees during - # training, so if a prompt is not specified the model will generate as if from the - # beginning of a new document. - class Prompt < OpenAI::Union - StringArray = OpenAI::ArrayOf[String] + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. + module Prompt + extend OpenAI::Internal::Type::Union - IntegerArray = OpenAI::ArrayOf[Integer] + variant String - ArrayOfToken2DArray = OpenAI::ArrayOf[OpenAI::ArrayOf[Integer]] + variant -> { OpenAI::Models::CompletionCreateParams::Prompt::StringArray } - variant String + variant -> { OpenAI::Models::CompletionCreateParams::Prompt::IntegerArray } + + variant -> { OpenAI::Models::CompletionCreateParams::Prompt::ArrayOfToken2DArray } + + # @!method self.variants + # @return [Array(String, Array, Array, Array>)] - variant OpenAI::Models::CompletionCreateParams::Prompt::StringArray + # @type [OpenAI::Internal::Type::Converter] + StringArray = OpenAI::Internal::Type::ArrayOf[String] - variant OpenAI::Models::CompletionCreateParams::Prompt::IntegerArray + # @type [OpenAI::Internal::Type::Converter] + IntegerArray = OpenAI::Internal::Type::ArrayOf[Integer] - variant OpenAI::Models::CompletionCreateParams::Prompt::ArrayOfToken2DArray + # @type [OpenAI::Internal::Type::Converter] + ArrayOfToken2DArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::ArrayOf[Integer]] end - # @abstract + # Not supported with latest reasoning models `o3` and `o4-mini`. # # Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. - class Stop < OpenAI::Union - StringArray = OpenAI::ArrayOf[String] + # returned text will not contain the stop sequence. + module Stop + extend OpenAI::Internal::Type::Union variant String - variant OpenAI::Models::CompletionCreateParams::Stop::StringArray + variant -> { OpenAI::Models::CompletionCreateParams::Stop::StringArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + StringArray = OpenAI::Internal::Type::ArrayOf[String] end end end diff --git a/lib/openai/models/completion_usage.rb b/lib/openai/models/completion_usage.rb index 5eebf968..8ca37f94 100644 --- a/lib/openai/models/completion_usage.rb +++ b/lib/openai/models/completion_usage.rb @@ -2,7 +2,7 @@ module OpenAI module Models - class CompletionUsage < OpenAI::BaseModel + class CompletionUsage < OpenAI::Internal::Type::BaseModel # @!attribute completion_tokens # Number of tokens in the generated completion. # @@ -21,144 +21,96 @@ class CompletionUsage < OpenAI::BaseModel # @return [Integer] required :total_tokens, Integer - # @!attribute [r] completion_tokens_details + # @!attribute completion_tokens_details # Breakdown of tokens used in a completion. # # @return [OpenAI::Models::CompletionUsage::CompletionTokensDetails, nil] - optional :completion_tokens_details, -> { OpenAI::Models::CompletionUsage::CompletionTokensDetails } + optional :completion_tokens_details, -> { OpenAI::CompletionUsage::CompletionTokensDetails } - # @!parse - # # @return [OpenAI::Models::CompletionUsage::CompletionTokensDetails] - # attr_writer :completion_tokens_details - - # @!attribute [r] prompt_tokens_details + # @!attribute prompt_tokens_details # Breakdown of tokens used in the prompt. # # @return [OpenAI::Models::CompletionUsage::PromptTokensDetails, nil] - optional :prompt_tokens_details, -> { OpenAI::Models::CompletionUsage::PromptTokensDetails } - - # @!parse - # # @return [OpenAI::Models::CompletionUsage::PromptTokensDetails] - # attr_writer :prompt_tokens_details - - # @!parse - # # Usage statistics for the completion request. - # # - # # @param completion_tokens [Integer] - # # @param prompt_tokens [Integer] - # # @param total_tokens [Integer] - # # @param completion_tokens_details [OpenAI::Models::CompletionUsage::CompletionTokensDetails] - # # @param prompt_tokens_details [OpenAI::Models::CompletionUsage::PromptTokensDetails] - # # - # def initialize( - # completion_tokens:, - # prompt_tokens:, - # total_tokens:, - # completion_tokens_details: nil, - # prompt_tokens_details: nil, - # ** - # ) - # super - # end + optional :prompt_tokens_details, -> { OpenAI::CompletionUsage::PromptTokensDetails } - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(completion_tokens:, prompt_tokens:, total_tokens:, completion_tokens_details: nil, prompt_tokens_details: nil) + # Usage statistics for the completion request. + # + # @param completion_tokens [Integer] Number of tokens in the generated completion. + # + # @param prompt_tokens [Integer] Number of tokens in the prompt. + # + # @param total_tokens [Integer] Total number of tokens used in the request (prompt + completion). + # + # @param completion_tokens_details [OpenAI::Models::CompletionUsage::CompletionTokensDetails] Breakdown of tokens used in a completion. + # + # @param prompt_tokens_details [OpenAI::Models::CompletionUsage::PromptTokensDetails] Breakdown of tokens used in the prompt. - class CompletionTokensDetails < OpenAI::BaseModel - # @!attribute [r] accepted_prediction_tokens + # @see OpenAI::Models::CompletionUsage#completion_tokens_details + class CompletionTokensDetails < OpenAI::Internal::Type::BaseModel + # @!attribute accepted_prediction_tokens # When using Predicted Outputs, the number of tokens in the prediction that - # appeared in the completion. + # appeared in the completion. # # @return [Integer, nil] optional :accepted_prediction_tokens, Integer - # @!parse - # # @return [Integer] - # attr_writer :accepted_prediction_tokens - - # @!attribute [r] audio_tokens + # @!attribute audio_tokens # Audio input tokens generated by the model. # # @return [Integer, nil] optional :audio_tokens, Integer - # @!parse - # # @return [Integer] - # attr_writer :audio_tokens - - # @!attribute [r] reasoning_tokens + # @!attribute reasoning_tokens # Tokens generated by the model for reasoning. # # @return [Integer, nil] optional :reasoning_tokens, Integer - # @!parse - # # @return [Integer] - # attr_writer :reasoning_tokens - - # @!attribute [r] rejected_prediction_tokens + # @!attribute rejected_prediction_tokens # When using Predicted Outputs, the number of tokens in the prediction that did - # not appear in the completion. However, like reasoning tokens, these tokens are - # still counted in the total completion tokens for purposes of billing, output, - # and context window limits. + # not appear in the completion. However, like reasoning tokens, these tokens are + # still counted in the total completion tokens for purposes of billing, output, + # and context window limits. # # @return [Integer, nil] optional :rejected_prediction_tokens, Integer - # @!parse - # # @return [Integer] - # attr_writer :rejected_prediction_tokens - - # @!parse - # # Breakdown of tokens used in a completion. - # # - # # @param accepted_prediction_tokens [Integer] - # # @param audio_tokens [Integer] - # # @param reasoning_tokens [Integer] - # # @param rejected_prediction_tokens [Integer] - # # - # def initialize( - # accepted_prediction_tokens: nil, - # audio_tokens: nil, - # reasoning_tokens: nil, - # rejected_prediction_tokens: nil, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(accepted_prediction_tokens: nil, audio_tokens: nil, reasoning_tokens: nil, rejected_prediction_tokens: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::CompletionUsage::CompletionTokensDetails} for more details. + # + # Breakdown of tokens used in a completion. + # + # @param accepted_prediction_tokens [Integer] When using Predicted Outputs, the number of tokens in the + # + # @param audio_tokens [Integer] Audio input tokens generated by the model. + # + # @param reasoning_tokens [Integer] Tokens generated by the model for reasoning. + # + # @param rejected_prediction_tokens [Integer] When using Predicted Outputs, the number of tokens in the end - class PromptTokensDetails < OpenAI::BaseModel - # @!attribute [r] audio_tokens + # @see OpenAI::Models::CompletionUsage#prompt_tokens_details + class PromptTokensDetails < OpenAI::Internal::Type::BaseModel + # @!attribute audio_tokens # Audio input tokens present in the prompt. # # @return [Integer, nil] optional :audio_tokens, Integer - # @!parse - # # @return [Integer] - # attr_writer :audio_tokens - - # @!attribute [r] cached_tokens + # @!attribute cached_tokens # Cached tokens present in the prompt. # # @return [Integer, nil] optional :cached_tokens, Integer - # @!parse - # # @return [Integer] - # attr_writer :cached_tokens - - # @!parse - # # Breakdown of tokens used in the prompt. - # # - # # @param audio_tokens [Integer] - # # @param cached_tokens [Integer] - # # - # def initialize(audio_tokens: nil, cached_tokens: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(audio_tokens: nil, cached_tokens: nil) + # Breakdown of tokens used in the prompt. + # + # @param audio_tokens [Integer] Audio input tokens present in the prompt. + # + # @param cached_tokens [Integer] Cached tokens present in the prompt. end end end diff --git a/lib/openai/models/compound_filter.rb b/lib/openai/models/compound_filter.rb index 68d4e0a2..e4e7fdcf 100644 --- a/lib/openai/models/compound_filter.rb +++ b/lib/openai/models/compound_filter.rb @@ -2,49 +2,55 @@ module OpenAI module Models - class CompoundFilter < OpenAI::BaseModel + class CompoundFilter < OpenAI::Internal::Type::BaseModel # @!attribute filters # Array of filters to combine. Items can be `ComparisonFilter` or - # `CompoundFilter`. + # `CompoundFilter`. # # @return [Array] - required :filters, -> { OpenAI::ArrayOf[union: OpenAI::Models::CompoundFilter::Filter] } + required :filters, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::CompoundFilter::Filter] } # @!attribute type # Type of operation: `and` or `or`. # # @return [Symbol, OpenAI::Models::CompoundFilter::Type] - required :type, enum: -> { OpenAI::Models::CompoundFilter::Type } + required :type, enum: -> { OpenAI::CompoundFilter::Type } - # @!parse - # # Combine multiple filters using `and` or `or`. - # # - # # @param filters [Array] - # # @param type [Symbol, OpenAI::Models::CompoundFilter::Type] - # # - # def initialize(filters:, type:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(filters:, type:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::CompoundFilter} for more details. + # + # Combine multiple filters using `and` or `or`. # + # @param filters [Array] Array of filters to combine. Items can be `ComparisonFilter` or `CompoundFilter` + # + # @param type [Symbol, OpenAI::Models::CompoundFilter::Type] Type of operation: `and` or `or`. + # A filter used to compare a specified attribute key to a given value using a - # defined comparison operation. - class Filter < OpenAI::Union + # defined comparison operation. + module Filter + extend OpenAI::Internal::Type::Union + # A filter used to compare a specified attribute key to a given value using a defined comparison operation. - variant -> { OpenAI::Models::ComparisonFilter } + variant -> { OpenAI::ComparisonFilter } + + variant OpenAI::Internal::Type::Unknown - variant OpenAI::Unknown + # @!method self.variants + # @return [Array(OpenAI::Models::ComparisonFilter, Object)] end - # @abstract - # # Type of operation: `and` or `or`. - class Type < OpenAI::Enum + # + # @see OpenAI::Models::CompoundFilter#type + module Type + extend OpenAI::Internal::Type::Enum + AND = :and OR = :or - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/container_create_params.rb b/lib/openai/models/container_create_params.rb new file mode 100644 index 00000000..00a41b82 --- /dev/null +++ b/lib/openai/models/container_create_params.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Containers#create + class ContainerCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute name + # Name of the container to create. + # + # @return [String] + required :name, String + + # @!attribute expires_after + # Container expiration time in seconds relative to the 'anchor' time. + # + # @return [OpenAI::Models::ContainerCreateParams::ExpiresAfter, nil] + optional :expires_after, -> { OpenAI::ContainerCreateParams::ExpiresAfter } + + # @!attribute file_ids + # IDs of files to copy to the container. + # + # @return [Array, nil] + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] + + # @!method initialize(name:, expires_after: nil, file_ids: nil, request_options: {}) + # @param name [String] Name of the container to create. + # + # @param expires_after [OpenAI::Models::ContainerCreateParams::ExpiresAfter] Container expiration time in seconds relative to the 'anchor' time. + # + # @param file_ids [Array] IDs of files to copy to the container. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + # @!attribute anchor + # Time anchor for the expiration time. Currently only 'last_active_at' is + # supported. + # + # @return [Symbol, OpenAI::Models::ContainerCreateParams::ExpiresAfter::Anchor] + required :anchor, enum: -> { OpenAI::ContainerCreateParams::ExpiresAfter::Anchor } + + # @!attribute minutes + # + # @return [Integer] + required :minutes, Integer + + # @!method initialize(anchor:, minutes:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ContainerCreateParams::ExpiresAfter} for more details. + # + # Container expiration time in seconds relative to the 'anchor' time. + # + # @param anchor [Symbol, OpenAI::Models::ContainerCreateParams::ExpiresAfter::Anchor] Time anchor for the expiration time. Currently only 'last_active_at' is supporte + # + # @param minutes [Integer] + + # Time anchor for the expiration time. Currently only 'last_active_at' is + # supported. + # + # @see OpenAI::Models::ContainerCreateParams::ExpiresAfter#anchor + module Anchor + extend OpenAI::Internal::Type::Enum + + LAST_ACTIVE_AT = :last_active_at + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/container_create_response.rb b/lib/openai/models/container_create_response.rb new file mode 100644 index 00000000..3db15ec9 --- /dev/null +++ b/lib/openai/models/container_create_response.rb @@ -0,0 +1,98 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Containers#create + class ContainerCreateResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the container. + # + # @return [String] + required :id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the container was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute name + # Name of the container. + # + # @return [String] + required :name, String + + # @!attribute object + # The type of this object. + # + # @return [String] + required :object, String + + # @!attribute status + # Status of the container (e.g., active, deleted). + # + # @return [String] + required :status, String + + # @!attribute expires_after + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + # + # @return [OpenAI::Models::ContainerCreateResponse::ExpiresAfter, nil] + optional :expires_after, -> { OpenAI::Models::ContainerCreateResponse::ExpiresAfter } + + # @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ContainerCreateResponse} for more details. + # + # @param id [String] Unique identifier for the container. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the container was created. + # + # @param name [String] Name of the container. + # + # @param object [String] The type of this object. + # + # @param status [String] Status of the container (e.g., active, deleted). + # + # @param expires_after [OpenAI::Models::ContainerCreateResponse::ExpiresAfter] The container will expire after this time period. + + # @see OpenAI::Models::ContainerCreateResponse#expires_after + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + # @!attribute anchor + # The reference point for the expiration. + # + # @return [Symbol, OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor, nil] + optional :anchor, enum: -> { OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor } + + # @!attribute minutes + # The number of minutes after the anchor before the container expires. + # + # @return [Integer, nil] + optional :minutes, Integer + + # @!method initialize(anchor: nil, minutes: nil) + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + # + # @param anchor [Symbol, OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor] The reference point for the expiration. + # + # @param minutes [Integer] The number of minutes after the anchor before the container expires. + + # The reference point for the expiration. + # + # @see OpenAI::Models::ContainerCreateResponse::ExpiresAfter#anchor + module Anchor + extend OpenAI::Internal::Type::Enum + + LAST_ACTIVE_AT = :last_active_at + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/container_delete_params.rb b/lib/openai/models/container_delete_params.rb new file mode 100644 index 00000000..becd24c9 --- /dev/null +++ b/lib/openai/models/container_delete_params.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Containers#delete + class ContainerDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end +end diff --git a/lib/openai/models/container_list_params.rb b/lib/openai/models/container_list_params.rb new file mode 100644 index 00000000..017dc9cf --- /dev/null +++ b/lib/openai/models/container_list_params.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Containers#list + class ContainerListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute after + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + # + # @return [String, nil] + optional :after, String + + # @!attribute limit + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + # + # @return [Integer, nil] + optional :limit, Integer + + # @!attribute order + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + # + # @return [Symbol, OpenAI::Models::ContainerListParams::Order, nil] + optional :order, enum: -> { OpenAI::ContainerListParams::Order } + + # @!method initialize(after: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ContainerListParams} for more details. + # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::ContainerListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + ASC = :asc + DESC = :desc + + # @!method self.values + # @return [Array] + end + end + end +end diff --git a/lib/openai/models/container_list_response.rb b/lib/openai/models/container_list_response.rb new file mode 100644 index 00000000..6d12abc7 --- /dev/null +++ b/lib/openai/models/container_list_response.rb @@ -0,0 +1,98 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Containers#list + class ContainerListResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the container. + # + # @return [String] + required :id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the container was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute name + # Name of the container. + # + # @return [String] + required :name, String + + # @!attribute object + # The type of this object. + # + # @return [String] + required :object, String + + # @!attribute status + # Status of the container (e.g., active, deleted). + # + # @return [String] + required :status, String + + # @!attribute expires_after + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + # + # @return [OpenAI::Models::ContainerListResponse::ExpiresAfter, nil] + optional :expires_after, -> { OpenAI::Models::ContainerListResponse::ExpiresAfter } + + # @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ContainerListResponse} for more details. + # + # @param id [String] Unique identifier for the container. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the container was created. + # + # @param name [String] Name of the container. + # + # @param object [String] The type of this object. + # + # @param status [String] Status of the container (e.g., active, deleted). + # + # @param expires_after [OpenAI::Models::ContainerListResponse::ExpiresAfter] The container will expire after this time period. + + # @see OpenAI::Models::ContainerListResponse#expires_after + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + # @!attribute anchor + # The reference point for the expiration. + # + # @return [Symbol, OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor, nil] + optional :anchor, enum: -> { OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor } + + # @!attribute minutes + # The number of minutes after the anchor before the container expires. + # + # @return [Integer, nil] + optional :minutes, Integer + + # @!method initialize(anchor: nil, minutes: nil) + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + # + # @param anchor [Symbol, OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor] The reference point for the expiration. + # + # @param minutes [Integer] The number of minutes after the anchor before the container expires. + + # The reference point for the expiration. + # + # @see OpenAI::Models::ContainerListResponse::ExpiresAfter#anchor + module Anchor + extend OpenAI::Internal::Type::Enum + + LAST_ACTIVE_AT = :last_active_at + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/container_retrieve_params.rb b/lib/openai/models/container_retrieve_params.rb new file mode 100644 index 00000000..821d0549 --- /dev/null +++ b/lib/openai/models/container_retrieve_params.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Containers#retrieve + class ContainerRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end +end diff --git a/lib/openai/models/container_retrieve_response.rb b/lib/openai/models/container_retrieve_response.rb new file mode 100644 index 00000000..19520ab0 --- /dev/null +++ b/lib/openai/models/container_retrieve_response.rb @@ -0,0 +1,98 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Containers#retrieve + class ContainerRetrieveResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the container. + # + # @return [String] + required :id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the container was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute name + # Name of the container. + # + # @return [String] + required :name, String + + # @!attribute object + # The type of this object. + # + # @return [String] + required :object, String + + # @!attribute status + # Status of the container (e.g., active, deleted). + # + # @return [String] + required :status, String + + # @!attribute expires_after + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + # + # @return [OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter, nil] + optional :expires_after, -> { OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter } + + # @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ContainerRetrieveResponse} for more details. + # + # @param id [String] Unique identifier for the container. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the container was created. + # + # @param name [String] Name of the container. + # + # @param object [String] The type of this object. + # + # @param status [String] Status of the container (e.g., active, deleted). + # + # @param expires_after [OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter] The container will expire after this time period. + + # @see OpenAI::Models::ContainerRetrieveResponse#expires_after + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + # @!attribute anchor + # The reference point for the expiration. + # + # @return [Symbol, OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor, nil] + optional :anchor, enum: -> { OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor } + + # @!attribute minutes + # The number of minutes after the anchor before the container expires. + # + # @return [Integer, nil] + optional :minutes, Integer + + # @!method initialize(anchor: nil, minutes: nil) + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + # + # @param anchor [Symbol, OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor] The reference point for the expiration. + # + # @param minutes [Integer] The number of minutes after the anchor before the container expires. + + # The reference point for the expiration. + # + # @see OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter#anchor + module Anchor + extend OpenAI::Internal::Type::Enum + + LAST_ACTIVE_AT = :last_active_at + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/containers/file_create_params.rb b/lib/openai/models/containers/file_create_params.rb new file mode 100644 index 00000000..07528c8e --- /dev/null +++ b/lib/openai/models/containers/file_create_params.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Containers + # @see OpenAI::Resources::Containers::Files#create + class FileCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute file + # The File object (not file name) to be uploaded. + # + # @return [Pathname, StringIO, IO, String, OpenAI::FilePart, nil] + optional :file, OpenAI::Internal::Type::FileInput + + # @!attribute file_id + # Name of the file to create. + # + # @return [String, nil] + optional :file_id, String + + # @!method initialize(file: nil, file_id: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Containers::FileCreateParams} for more details. + # + # @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The File object (not file name) to be uploaded. + # + # @param file_id [String] Name of the file to create. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/containers/file_create_response.rb b/lib/openai/models/containers/file_create_response.rb new file mode 100644 index 00000000..408ac8c7 --- /dev/null +++ b/lib/openai/models/containers/file_create_response.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Containers + # @see OpenAI::Resources::Containers::Files#create + class FileCreateResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the file. + # + # @return [String] + required :id, String + + # @!attribute bytes + # Size of the file in bytes. + # + # @return [Integer] + required :bytes, Integer + + # @!attribute container_id + # The container this file belongs to. + # + # @return [String] + required :container_id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the file was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute object + # The type of this object (`container.file`). + # + # @return [Symbol, :"container.file"] + required :object, const: :"container.file" + + # @!attribute path + # Path of the file in the container. + # + # @return [String] + required :path, String + + # @!attribute source + # Source of the file (e.g., `user`, `assistant`). + # + # @return [String] + required :source, String + + # @!method initialize(id:, bytes:, container_id:, created_at:, path:, source:, object: :"container.file") + # @param id [String] Unique identifier for the file. + # + # @param bytes [Integer] Size of the file in bytes. + # + # @param container_id [String] The container this file belongs to. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the file was created. + # + # @param path [String] Path of the file in the container. + # + # @param source [String] Source of the file (e.g., `user`, `assistant`). + # + # @param object [Symbol, :"container.file"] The type of this object (`container.file`). + end + end + end +end diff --git a/lib/openai/models/containers/file_delete_params.rb b/lib/openai/models/containers/file_delete_params.rb new file mode 100644 index 00000000..b9865119 --- /dev/null +++ b/lib/openai/models/containers/file_delete_params.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Containers + # @see OpenAI::Resources::Containers::Files#delete + class FileDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute container_id + # + # @return [String] + required :container_id, String + + # @!method initialize(container_id:, request_options: {}) + # @param container_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/containers/file_list_params.rb b/lib/openai/models/containers/file_list_params.rb new file mode 100644 index 00000000..a7b2de9b --- /dev/null +++ b/lib/openai/models/containers/file_list_params.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Containers + # @see OpenAI::Resources::Containers::Files#list + class FileListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute after + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + # + # @return [String, nil] + optional :after, String + + # @!attribute limit + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + # + # @return [Integer, nil] + optional :limit, Integer + + # @!attribute order + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + # + # @return [Symbol, OpenAI::Models::Containers::FileListParams::Order, nil] + optional :order, enum: -> { OpenAI::Containers::FileListParams::Order } + + # @!method initialize(after: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Containers::FileListParams} for more details. + # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::Containers::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + ASC = :asc + DESC = :desc + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/containers/file_list_response.rb b/lib/openai/models/containers/file_list_response.rb new file mode 100644 index 00000000..55433ce5 --- /dev/null +++ b/lib/openai/models/containers/file_list_response.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Containers + # @see OpenAI::Resources::Containers::Files#list + class FileListResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the file. + # + # @return [String] + required :id, String + + # @!attribute bytes + # Size of the file in bytes. + # + # @return [Integer] + required :bytes, Integer + + # @!attribute container_id + # The container this file belongs to. + # + # @return [String] + required :container_id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the file was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute object + # The type of this object (`container.file`). + # + # @return [Symbol, :"container.file"] + required :object, const: :"container.file" + + # @!attribute path + # Path of the file in the container. + # + # @return [String] + required :path, String + + # @!attribute source + # Source of the file (e.g., `user`, `assistant`). + # + # @return [String] + required :source, String + + # @!method initialize(id:, bytes:, container_id:, created_at:, path:, source:, object: :"container.file") + # @param id [String] Unique identifier for the file. + # + # @param bytes [Integer] Size of the file in bytes. + # + # @param container_id [String] The container this file belongs to. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the file was created. + # + # @param path [String] Path of the file in the container. + # + # @param source [String] Source of the file (e.g., `user`, `assistant`). + # + # @param object [Symbol, :"container.file"] The type of this object (`container.file`). + end + end + end +end diff --git a/lib/openai/models/containers/file_retrieve_params.rb b/lib/openai/models/containers/file_retrieve_params.rb new file mode 100644 index 00000000..781e8f38 --- /dev/null +++ b/lib/openai/models/containers/file_retrieve_params.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Containers + # @see OpenAI::Resources::Containers::Files#retrieve + class FileRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute container_id + # + # @return [String] + required :container_id, String + + # @!method initialize(container_id:, request_options: {}) + # @param container_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/containers/file_retrieve_response.rb b/lib/openai/models/containers/file_retrieve_response.rb new file mode 100644 index 00000000..4c905b59 --- /dev/null +++ b/lib/openai/models/containers/file_retrieve_response.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Containers + # @see OpenAI::Resources::Containers::Files#retrieve + class FileRetrieveResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the file. + # + # @return [String] + required :id, String + + # @!attribute bytes + # Size of the file in bytes. + # + # @return [Integer] + required :bytes, Integer + + # @!attribute container_id + # The container this file belongs to. + # + # @return [String] + required :container_id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the file was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute object + # The type of this object (`container.file`). + # + # @return [Symbol, :"container.file"] + required :object, const: :"container.file" + + # @!attribute path + # Path of the file in the container. + # + # @return [String] + required :path, String + + # @!attribute source + # Source of the file (e.g., `user`, `assistant`). + # + # @return [String] + required :source, String + + # @!method initialize(id:, bytes:, container_id:, created_at:, path:, source:, object: :"container.file") + # @param id [String] Unique identifier for the file. + # + # @param bytes [Integer] Size of the file in bytes. + # + # @param container_id [String] The container this file belongs to. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the file was created. + # + # @param path [String] Path of the file in the container. + # + # @param source [String] Source of the file (e.g., `user`, `assistant`). + # + # @param object [Symbol, :"container.file"] The type of this object (`container.file`). + end + end + end +end diff --git a/lib/openai/models/containers/files/content_retrieve_params.rb b/lib/openai/models/containers/files/content_retrieve_params.rb new file mode 100644 index 00000000..b765ee47 --- /dev/null +++ b/lib/openai/models/containers/files/content_retrieve_params.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Containers + module Files + # @see OpenAI::Resources::Containers::Files::Content#retrieve + class ContentRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute container_id + # + # @return [String] + required :container_id, String + + # @!method initialize(container_id:, request_options: {}) + # @param container_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end + end +end diff --git a/lib/openai/models/conversations/computer_screenshot_content.rb b/lib/openai/models/conversations/computer_screenshot_content.rb new file mode 100644 index 00000000..1b030a1c --- /dev/null +++ b/lib/openai/models/conversations/computer_screenshot_content.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class ComputerScreenshotContent < OpenAI::Internal::Type::BaseModel + # @!attribute file_id + # The identifier of an uploaded file that contains the screenshot. + # + # @return [String, nil] + required :file_id, String, nil?: true + + # @!attribute image_url + # The URL of the screenshot image. + # + # @return [String, nil] + required :image_url, String, nil?: true + + # @!attribute type + # Specifies the event type. For a computer screenshot, this property is always set + # to `computer_screenshot`. + # + # @return [Symbol, :computer_screenshot] + required :type, const: :computer_screenshot + + # @!method initialize(file_id:, image_url:, type: :computer_screenshot) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ComputerScreenshotContent} for more details. + # + # @param file_id [String, nil] The identifier of an uploaded file that contains the screenshot. + # + # @param image_url [String, nil] The URL of the screenshot image. + # + # @param type [Symbol, :computer_screenshot] Specifies the event type. For a computer screenshot, this property is always set + end + end + end +end diff --git a/lib/openai/models/conversations/container_file_citation_body.rb b/lib/openai/models/conversations/container_file_citation_body.rb new file mode 100644 index 00000000..4c373465 --- /dev/null +++ b/lib/openai/models/conversations/container_file_citation_body.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class ContainerFileCitationBody < OpenAI::Internal::Type::BaseModel + # @!attribute container_id + # The ID of the container file. + # + # @return [String] + required :container_id, String + + # @!attribute end_index + # The index of the last character of the container file citation in the message. + # + # @return [Integer] + required :end_index, Integer + + # @!attribute file_id + # The ID of the file. + # + # @return [String] + required :file_id, String + + # @!attribute filename + # The filename of the container file cited. + # + # @return [String] + required :filename, String + + # @!attribute start_index + # The index of the first character of the container file citation in the message. + # + # @return [Integer] + required :start_index, Integer + + # @!attribute type + # The type of the container file citation. Always `container_file_citation`. + # + # @return [Symbol, :container_file_citation] + required :type, const: :container_file_citation + + # @!method initialize(container_id:, end_index:, file_id:, filename:, start_index:, type: :container_file_citation) + # @param container_id [String] The ID of the container file. + # + # @param end_index [Integer] The index of the last character of the container file citation in the message. + # + # @param file_id [String] The ID of the file. + # + # @param filename [String] The filename of the container file cited. + # + # @param start_index [Integer] The index of the first character of the container file citation in the message. + # + # @param type [Symbol, :container_file_citation] The type of the container file citation. Always `container_file_citation`. + end + end + end +end diff --git a/lib/openai/models/conversations/conversation.rb b/lib/openai/models/conversations/conversation.rb new file mode 100644 index 00000000..8e39df4c --- /dev/null +++ b/lib/openai/models/conversations/conversation.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations#create + class Conversation < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the conversation. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The time at which the conversation was created, measured in seconds since the + # Unix epoch. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters. + # + # @return [Object] + required :metadata, OpenAI::Internal::Type::Unknown + + # @!attribute object + # The object type, which is always `conversation`. + # + # @return [Symbol, :conversation] + required :object, const: :conversation + + # @!method initialize(id:, created_at:, metadata:, object: :conversation) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::Conversation} for more details. + # + # @param id [String] The unique ID of the conversation. + # + # @param created_at [Integer] The time at which the conversation was created, measured in seconds since the Un + # + # @param metadata [Object] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param object [Symbol, :conversation] The object type, which is always `conversation`. + end + end + end +end diff --git a/lib/openai/models/conversations/conversation_create_params.rb b/lib/openai/models/conversations/conversation_create_params.rb new file mode 100644 index 00000000..58cc4ba9 --- /dev/null +++ b/lib/openai/models/conversations/conversation_create_params.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations#create + class ConversationCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute items + # Initial items to include in the conversation context. You may add up to 20 items + # at a time. + # + # @return [Array, nil] + optional :items, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputItem] }, + nil?: true + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. Useful for storing + # additional information about the object in a structured format. + # + # @return [Hash{Symbol=>String}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!method initialize(items: nil, metadata: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationCreateParams} for more details. + # + # @param items [Array, nil] Initial items to include in the conversation context. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. Useful for + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/conversations/conversation_delete_params.rb b/lib/openai/models/conversations/conversation_delete_params.rb new file mode 100644 index 00000000..88897fec --- /dev/null +++ b/lib/openai/models/conversations/conversation_delete_params.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations#delete + class ConversationDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/conversations/conversation_deleted.rb b/lib/openai/models/conversations/conversation_deleted.rb new file mode 100644 index 00000000..3d599ebe --- /dev/null +++ b/lib/openai/models/conversations/conversation_deleted.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class ConversationDeleted < OpenAI::Internal::Type::BaseModel + # @!attribute id + # + # @return [String] + required :id, String + + # @!attribute deleted + # + # @return [Boolean] + required :deleted, OpenAI::Internal::Type::Boolean + + # @!attribute object + # + # @return [Symbol, :"conversation.deleted"] + required :object, const: :"conversation.deleted" + + # @!method initialize(id:, deleted:, object: :"conversation.deleted") + # @param id [String] + # @param deleted [Boolean] + # @param object [Symbol, :"conversation.deleted"] + end + end + end +end diff --git a/lib/openai/models/conversations/conversation_deleted_resource.rb b/lib/openai/models/conversations/conversation_deleted_resource.rb new file mode 100644 index 00000000..08baf435 --- /dev/null +++ b/lib/openai/models/conversations/conversation_deleted_resource.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations#delete + class ConversationDeletedResource < OpenAI::Internal::Type::BaseModel + # @!attribute id + # + # @return [String] + required :id, String + + # @!attribute deleted + # + # @return [Boolean] + required :deleted, OpenAI::Internal::Type::Boolean + + # @!attribute object + # + # @return [Symbol, :"conversation.deleted"] + required :object, const: :"conversation.deleted" + + # @!method initialize(id:, deleted:, object: :"conversation.deleted") + # @param id [String] + # @param deleted [Boolean] + # @param object [Symbol, :"conversation.deleted"] + end + end + end +end diff --git a/lib/openai/models/conversations/conversation_item.rb b/lib/openai/models/conversations/conversation_item.rb new file mode 100644 index 00000000..3ca95f12 --- /dev/null +++ b/lib/openai/models/conversations/conversation_item.rb @@ -0,0 +1,568 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # A single item within a conversation. The set of possible types are the same as + # the `output` type of a + # [Response object](https://platform.openai.com/docs/api-reference/responses/object#responses/object-output). + # + # @see OpenAI::Resources::Conversations::Items#retrieve + module ConversationItem + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :message, -> { OpenAI::Conversations::Message } + + # A tool call to run a function. See the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. + variant :function_call, -> { OpenAI::Responses::ResponseFunctionToolCallItem } + + variant :function_call_output, -> { OpenAI::Responses::ResponseFunctionToolCallOutputItem } + + # The results of a file search tool call. See the + # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information. + variant :file_search_call, -> { OpenAI::Responses::ResponseFileSearchToolCall } + + # The results of a web search tool call. See the + # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information. + variant :web_search_call, -> { OpenAI::Responses::ResponseFunctionWebSearch } + + # An image generation request made by the model. + variant :image_generation_call, -> { OpenAI::Conversations::ConversationItem::ImageGenerationCall } + + # A tool call to a computer use tool. See the + # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information. + variant :computer_call, -> { OpenAI::Responses::ResponseComputerToolCall } + + variant :computer_call_output, -> { OpenAI::Responses::ResponseComputerToolCallOutputItem } + + # A description of the chain of thought used by a reasoning model while generating + # a response. Be sure to include these items in your `input` to the Responses API + # for subsequent turns of a conversation if you are manually + # [managing context](https://platform.openai.com/docs/guides/conversation-state). + variant :reasoning, -> { OpenAI::Responses::ResponseReasoningItem } + + # A tool call to run code. + variant :code_interpreter_call, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall } + + # A tool call to run a command on the local shell. + variant :local_shell_call, -> { OpenAI::Conversations::ConversationItem::LocalShellCall } + + # The output of a local shell tool call. + variant :local_shell_call_output, -> { OpenAI::Conversations::ConversationItem::LocalShellCallOutput } + + # A list of tools available on an MCP server. + variant :mcp_list_tools, -> { OpenAI::Conversations::ConversationItem::McpListTools } + + # A request for human approval of a tool invocation. + variant :mcp_approval_request, -> { OpenAI::Conversations::ConversationItem::McpApprovalRequest } + + # A response to an MCP approval request. + variant :mcp_approval_response, -> { OpenAI::Conversations::ConversationItem::McpApprovalResponse } + + # An invocation of a tool on an MCP server. + variant :mcp_call, -> { OpenAI::Conversations::ConversationItem::McpCall } + + # A call to a custom tool created by the model. + variant :custom_tool_call, -> { OpenAI::Responses::ResponseCustomToolCall } + + # The output of a custom tool call from your code, being sent back to the model. + variant :custom_tool_call_output, -> { OpenAI::Responses::ResponseCustomToolCallOutput } + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the image generation call. + # + # @return [String] + required :id, String + + # @!attribute result + # The generated image encoded in base64. + # + # @return [String, nil] + required :result, String, nil?: true + + # @!attribute status + # The status of the image generation call. + # + # @return [Symbol, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall::Status] + required :status, enum: -> { OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status } + + # @!attribute type + # The type of the image generation call. Always `image_generation_call`. + # + # @return [Symbol, :image_generation_call] + required :type, const: :image_generation_call + + # @!method initialize(id:, result:, status:, type: :image_generation_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall} for more + # details. + # + # An image generation request made by the model. + # + # @param id [String] The unique ID of the image generation call. + # + # @param result [String, nil] The generated image encoded in base64. + # + # @param status [Symbol, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall::Status] The status of the image generation call. + # + # @param type [Symbol, :image_generation_call] The type of the image generation call. Always `image_generation_call`. + + # The status of the image generation call. + # + # @see OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + GENERATING = :generating + FAILED = :failed + + # @!method self.values + # @return [Array] + end + end + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the local shell call. + # + # @return [String] + required :id, String + + # @!attribute action + # Execute a shell command on the server. + # + # @return [OpenAI::Models::Conversations::ConversationItem::LocalShellCall::Action] + required :action, -> { OpenAI::Conversations::ConversationItem::LocalShellCall::Action } + + # @!attribute call_id + # The unique ID of the local shell tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute status + # The status of the local shell call. + # + # @return [Symbol, OpenAI::Models::Conversations::ConversationItem::LocalShellCall::Status] + required :status, enum: -> { OpenAI::Conversations::ConversationItem::LocalShellCall::Status } + + # @!attribute type + # The type of the local shell call. Always `local_shell_call`. + # + # @return [Symbol, :local_shell_call] + required :type, const: :local_shell_call + + # @!method initialize(id:, action:, call_id:, status:, type: :local_shell_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationItem::LocalShellCall} for more + # details. + # + # A tool call to run a command on the local shell. + # + # @param id [String] The unique ID of the local shell call. + # + # @param action [OpenAI::Models::Conversations::ConversationItem::LocalShellCall::Action] Execute a shell command on the server. + # + # @param call_id [String] The unique ID of the local shell tool call generated by the model. + # + # @param status [Symbol, OpenAI::Models::Conversations::ConversationItem::LocalShellCall::Status] The status of the local shell call. + # + # @param type [Symbol, :local_shell_call] The type of the local shell call. Always `local_shell_call`. + + # @see OpenAI::Models::Conversations::ConversationItem::LocalShellCall#action + class Action < OpenAI::Internal::Type::BaseModel + # @!attribute command + # The command to run. + # + # @return [Array] + required :command, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute env + # Environment variables to set for the command. + # + # @return [Hash{Symbol=>String}] + required :env, OpenAI::Internal::Type::HashOf[String] + + # @!attribute type + # The type of the local shell action. Always `exec`. + # + # @return [Symbol, :exec] + required :type, const: :exec + + # @!attribute timeout_ms + # Optional timeout in milliseconds for the command. + # + # @return [Integer, nil] + optional :timeout_ms, Integer, nil?: true + + # @!attribute user + # Optional user to run the command as. + # + # @return [String, nil] + optional :user, String, nil?: true + + # @!attribute working_directory + # Optional working directory to run the command in. + # + # @return [String, nil] + optional :working_directory, String, nil?: true + + # @!method initialize(command:, env:, timeout_ms: nil, user: nil, working_directory: nil, type: :exec) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationItem::LocalShellCall::Action} for + # more details. + # + # Execute a shell command on the server. + # + # @param command [Array] The command to run. + # + # @param env [Hash{Symbol=>String}] Environment variables to set for the command. + # + # @param timeout_ms [Integer, nil] Optional timeout in milliseconds for the command. + # + # @param user [String, nil] Optional user to run the command as. + # + # @param working_directory [String, nil] Optional working directory to run the command in. + # + # @param type [Symbol, :exec] The type of the local shell action. Always `exec`. + end + + # The status of the local shell call. + # + # @see OpenAI::Models::Conversations::ConversationItem::LocalShellCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + + class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the local shell tool call generated by the model. + # + # @return [String] + required :id, String + + # @!attribute output + # A JSON string of the output of the local shell tool call. + # + # @return [String] + required :output, String + + # @!attribute type + # The type of the local shell tool call output. Always `local_shell_call_output`. + # + # @return [Symbol, :local_shell_call_output] + required :type, const: :local_shell_call_output + + # @!attribute status + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # + # @return [Symbol, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput::Status, nil] + optional :status, + enum: -> { OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status }, + nil?: true + + # @!method initialize(id:, output:, status: nil, type: :local_shell_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput} for more + # details. + # + # The output of a local shell tool call. + # + # @param id [String] The unique ID of the local shell tool call generated by the model. + # + # @param output [String] A JSON string of the output of the local shell tool call. + # + # @param status [Symbol, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput::Status, nil] The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # + # @param type [Symbol, :local_shell_call_output] The type of the local shell tool call output. Always `local_shell_call_output`. + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # + # @see OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + + class McpListTools < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the list. + # + # @return [String] + required :id, String + + # @!attribute server_label + # The label of the MCP server. + # + # @return [String] + required :server_label, String + + # @!attribute tools + # The tools available on the server. + # + # @return [Array] + required :tools, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Conversations::ConversationItem::McpListTools::Tool] } + + # @!attribute type + # The type of the item. Always `mcp_list_tools`. + # + # @return [Symbol, :mcp_list_tools] + required :type, const: :mcp_list_tools + + # @!attribute error + # Error message if the server could not list tools. + # + # @return [String, nil] + optional :error, String, nil?: true + + # @!method initialize(id:, server_label:, tools:, error: nil, type: :mcp_list_tools) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationItem::McpListTools} for more + # details. + # + # A list of tools available on an MCP server. + # + # @param id [String] The unique ID of the list. + # + # @param server_label [String] The label of the MCP server. + # + # @param tools [Array] The tools available on the server. + # + # @param error [String, nil] Error message if the server could not list tools. + # + # @param type [Symbol, :mcp_list_tools] The type of the item. Always `mcp_list_tools`. + + class Tool < OpenAI::Internal::Type::BaseModel + # @!attribute input_schema + # The JSON schema describing the tool's input. + # + # @return [Object] + required :input_schema, OpenAI::Internal::Type::Unknown + + # @!attribute name + # The name of the tool. + # + # @return [String] + required :name, String + + # @!attribute annotations + # Additional annotations about the tool. + # + # @return [Object, nil] + optional :annotations, OpenAI::Internal::Type::Unknown, nil?: true + + # @!attribute description + # The description of the tool. + # + # @return [String, nil] + optional :description, String, nil?: true + + # @!method initialize(input_schema:, name:, annotations: nil, description: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationItem::McpListTools::Tool} for more + # details. + # + # A tool available on an MCP server. + # + # @param input_schema [Object] The JSON schema describing the tool's input. + # + # @param name [String] The name of the tool. + # + # @param annotations [Object, nil] Additional annotations about the tool. + # + # @param description [String, nil] The description of the tool. + end + end + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the approval request. + # + # @return [String] + required :id, String + + # @!attribute arguments + # A JSON string of arguments for the tool. + # + # @return [String] + required :arguments, String + + # @!attribute name + # The name of the tool to run. + # + # @return [String] + required :name, String + + # @!attribute server_label + # The label of the MCP server making the request. + # + # @return [String] + required :server_label, String + + # @!attribute type + # The type of the item. Always `mcp_approval_request`. + # + # @return [Symbol, :mcp_approval_request] + required :type, const: :mcp_approval_request + + # @!method initialize(id:, arguments:, name:, server_label:, type: :mcp_approval_request) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest} for more + # details. + # + # A request for human approval of a tool invocation. + # + # @param id [String] The unique ID of the approval request. + # + # @param arguments [String] A JSON string of arguments for the tool. + # + # @param name [String] The name of the tool to run. + # + # @param server_label [String] The label of the MCP server making the request. + # + # @param type [Symbol, :mcp_approval_request] The type of the item. Always `mcp_approval_request`. + end + + class McpApprovalResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the approval response + # + # @return [String] + required :id, String + + # @!attribute approval_request_id + # The ID of the approval request being answered. + # + # @return [String] + required :approval_request_id, String + + # @!attribute approve + # Whether the request was approved. + # + # @return [Boolean] + required :approve, OpenAI::Internal::Type::Boolean + + # @!attribute type + # The type of the item. Always `mcp_approval_response`. + # + # @return [Symbol, :mcp_approval_response] + required :type, const: :mcp_approval_response + + # @!attribute reason + # Optional reason for the decision. + # + # @return [String, nil] + optional :reason, String, nil?: true + + # @!method initialize(id:, approval_request_id:, approve:, reason: nil, type: :mcp_approval_response) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse} for more + # details. + # + # A response to an MCP approval request. + # + # @param id [String] The unique ID of the approval response + # + # @param approval_request_id [String] The ID of the approval request being answered. + # + # @param approve [Boolean] Whether the request was approved. + # + # @param reason [String, nil] Optional reason for the decision. + # + # @param type [Symbol, :mcp_approval_response] The type of the item. Always `mcp_approval_response`. + end + + class McpCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the tool call. + # + # @return [String] + required :id, String + + # @!attribute arguments + # A JSON string of the arguments passed to the tool. + # + # @return [String] + required :arguments, String + + # @!attribute name + # The name of the tool that was run. + # + # @return [String] + required :name, String + + # @!attribute server_label + # The label of the MCP server running the tool. + # + # @return [String] + required :server_label, String + + # @!attribute type + # The type of the item. Always `mcp_call`. + # + # @return [Symbol, :mcp_call] + required :type, const: :mcp_call + + # @!attribute error + # The error from the tool call, if any. + # + # @return [String, nil] + optional :error, String, nil?: true + + # @!attribute output + # The output from the tool call. + # + # @return [String, nil] + optional :output, String, nil?: true + + # @!method initialize(id:, arguments:, name:, server_label:, error: nil, output: nil, type: :mcp_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationItem::McpCall} for more details. + # + # An invocation of a tool on an MCP server. + # + # @param id [String] The unique ID of the tool call. + # + # @param arguments [String] A JSON string of the arguments passed to the tool. + # + # @param name [String] The name of the tool that was run. + # + # @param server_label [String] The label of the MCP server running the tool. + # + # @param error [String, nil] The error from the tool call, if any. + # + # @param output [String, nil] The output from the tool call. + # + # @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput)] + end + end + + ConversationItem = Conversations::ConversationItem + end +end diff --git a/lib/openai/models/conversations/conversation_item_list.rb b/lib/openai/models/conversations/conversation_item_list.rb new file mode 100644 index 00000000..4c36f797 --- /dev/null +++ b/lib/openai/models/conversations/conversation_item_list.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations::Items#create + class ConversationItemList < OpenAI::Internal::Type::BaseModel + # @!attribute data + # A list of conversation items. + # + # @return [Array] + required :data, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::ConversationItem] } + + # @!attribute first_id + # The ID of the first item in the list. + # + # @return [String] + required :first_id, String + + # @!attribute has_more + # Whether there are more items available. + # + # @return [Boolean] + required :has_more, OpenAI::Internal::Type::Boolean + + # @!attribute last_id + # The ID of the last item in the list. + # + # @return [String] + required :last_id, String + + # @!attribute object + # The type of object returned, must be `list`. + # + # @return [Symbol, :list] + required :object, const: :list + + # @!method initialize(data:, first_id:, has_more:, last_id:, object: :list) + # A list of Conversation items. + # + # @param data [Array] A list of conversation items. + # + # @param first_id [String] The ID of the first item in the list. + # + # @param has_more [Boolean] Whether there are more items available. + # + # @param last_id [String] The ID of the last item in the list. + # + # @param object [Symbol, :list] The type of object returned, must be `list`. + end + end + + ConversationItemList = Conversations::ConversationItemList + end +end diff --git a/lib/openai/models/conversations/conversation_retrieve_params.rb b/lib/openai/models/conversations/conversation_retrieve_params.rb new file mode 100644 index 00000000..4683d715 --- /dev/null +++ b/lib/openai/models/conversations/conversation_retrieve_params.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations#retrieve + class ConversationRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/conversations/conversation_update_params.rb b/lib/openai/models/conversations/conversation_update_params.rb new file mode 100644 index 00000000..d268166e --- /dev/null +++ b/lib/openai/models/conversations/conversation_update_params.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations#update + class ConversationUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters. + # + # @return [Hash{Symbol=>String}] + required :metadata, OpenAI::Internal::Type::HashOf[String] + + # @!method initialize(metadata:, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationUpdateParams} for more details. + # + # @param metadata [Hash{Symbol=>String}] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/conversations/file_citation_body.rb b/lib/openai/models/conversations/file_citation_body.rb new file mode 100644 index 00000000..93d84a93 --- /dev/null +++ b/lib/openai/models/conversations/file_citation_body.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class FileCitationBody < OpenAI::Internal::Type::BaseModel + # @!attribute file_id + # The ID of the file. + # + # @return [String] + required :file_id, String + + # @!attribute filename + # The filename of the file cited. + # + # @return [String] + required :filename, String + + # @!attribute index + # The index of the file in the list of files. + # + # @return [Integer] + required :index, Integer + + # @!attribute type + # The type of the file citation. Always `file_citation`. + # + # @return [Symbol, :file_citation] + required :type, const: :file_citation + + # @!method initialize(file_id:, filename:, index:, type: :file_citation) + # @param file_id [String] The ID of the file. + # + # @param filename [String] The filename of the file cited. + # + # @param index [Integer] The index of the file in the list of files. + # + # @param type [Symbol, :file_citation] The type of the file citation. Always `file_citation`. + end + end + end +end diff --git a/lib/openai/models/conversations/input_file_content.rb b/lib/openai/models/conversations/input_file_content.rb new file mode 100644 index 00000000..1cb5b5fa --- /dev/null +++ b/lib/openai/models/conversations/input_file_content.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class InputFileContent < OpenAI::Internal::Type::BaseModel + # @!attribute file_id + # The ID of the file to be sent to the model. + # + # @return [String, nil] + required :file_id, String, nil?: true + + # @!attribute type + # The type of the input item. Always `input_file`. + # + # @return [Symbol, :input_file] + required :type, const: :input_file + + # @!attribute file_url + # The URL of the file to be sent to the model. + # + # @return [String, nil] + optional :file_url, String + + # @!attribute filename + # The name of the file to be sent to the model. + # + # @return [String, nil] + optional :filename, String + + # @!method initialize(file_id:, file_url: nil, filename: nil, type: :input_file) + # @param file_id [String, nil] The ID of the file to be sent to the model. + # + # @param file_url [String] The URL of the file to be sent to the model. + # + # @param filename [String] The name of the file to be sent to the model. + # + # @param type [Symbol, :input_file] The type of the input item. Always `input_file`. + end + end + end +end diff --git a/lib/openai/models/conversations/input_image_content.rb b/lib/openai/models/conversations/input_image_content.rb new file mode 100644 index 00000000..63d1ef58 --- /dev/null +++ b/lib/openai/models/conversations/input_image_content.rb @@ -0,0 +1,62 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class InputImageContent < OpenAI::Internal::Type::BaseModel + # @!attribute detail + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @return [Symbol, OpenAI::Models::Conversations::InputImageContent::Detail] + required :detail, enum: -> { OpenAI::Conversations::InputImageContent::Detail } + + # @!attribute file_id + # The ID of the file to be sent to the model. + # + # @return [String, nil] + required :file_id, String, nil?: true + + # @!attribute image_url + # The URL of the image to be sent to the model. A fully qualified URL or base64 + # encoded image in a data URL. + # + # @return [String, nil] + required :image_url, String, nil?: true + + # @!attribute type + # The type of the input item. Always `input_image`. + # + # @return [Symbol, :input_image] + required :type, const: :input_image + + # @!method initialize(detail:, file_id:, image_url:, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::InputImageContent} for more details. + # + # @param detail [Symbol, OpenAI::Models::Conversations::InputImageContent::Detail] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param file_id [String, nil] The ID of the file to be sent to the model. + # + # @param image_url [String, nil] The URL of the image to be sent to the model. A fully qualified URL or base64 en + # + # @param type [Symbol, :input_image] The type of the input item. Always `input_image`. + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @see OpenAI::Models::Conversations::InputImageContent#detail + module Detail + extend OpenAI::Internal::Type::Enum + + LOW = :low + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/conversations/input_text_content.rb b/lib/openai/models/conversations/input_text_content.rb new file mode 100644 index 00000000..81c74843 --- /dev/null +++ b/lib/openai/models/conversations/input_text_content.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class InputTextContent < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text input to the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the input item. Always `input_text`. + # + # @return [Symbol, :input_text] + required :type, const: :input_text + + # @!method initialize(text:, type: :input_text) + # @param text [String] The text input to the model. + # + # @param type [Symbol, :input_text] The type of the input item. Always `input_text`. + end + end + end +end diff --git a/lib/openai/models/conversations/item_create_params.rb b/lib/openai/models/conversations/item_create_params.rb new file mode 100644 index 00000000..bebc18e0 --- /dev/null +++ b/lib/openai/models/conversations/item_create_params.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations::Items#create + class ItemCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute items + # The items to add to the conversation. You may add up to 20 items at a time. + # + # @return [Array] + required :items, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputItem] } + + # @!attribute include + # Additional fields to include in the response. See the `include` parameter for + # [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + # for more information. + # + # @return [Array, nil] + optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] } + + # @!method initialize(items:, include: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ItemCreateParams} for more details. + # + # @param items [Array] The items to add to the conversation. You may add up to 20 items at a time. + # + # @param include [Array] Additional fields to include in the response. See the `include` + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/conversations/item_delete_params.rb b/lib/openai/models/conversations/item_delete_params.rb new file mode 100644 index 00000000..6a2eb57f --- /dev/null +++ b/lib/openai/models/conversations/item_delete_params.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations::Items#delete + class ItemDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute conversation_id + # + # @return [String] + required :conversation_id, String + + # @!method initialize(conversation_id:, request_options: {}) + # @param conversation_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/conversations/item_list_params.rb b/lib/openai/models/conversations/item_list_params.rb new file mode 100644 index 00000000..ffddb386 --- /dev/null +++ b/lib/openai/models/conversations/item_list_params.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations::Items#list + class ItemListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute after + # An item ID to list items after, used in pagination. + # + # @return [String, nil] + optional :after, String + + # @!attribute include + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). + # + # @return [Array, nil] + optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] } + + # @!attribute limit + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + # + # @return [Integer, nil] + optional :limit, Integer + + # @!attribute order + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + # + # @return [Symbol, OpenAI::Models::Conversations::ItemListParams::Order, nil] + optional :order, enum: -> { OpenAI::Conversations::ItemListParams::Order } + + # @!method initialize(after: nil, include: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ItemListParams} for more details. + # + # @param after [String] An item ID to list items after, used in pagination. + # + # @param include [Array] Specify additional output data to include in the model response. Currently + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between + # + # @param order [Symbol, OpenAI::Models::Conversations::ItemListParams::Order] The order to return the input items in. Default is `desc`. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + module Order + extend OpenAI::Internal::Type::Enum + + ASC = :asc + DESC = :desc + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/conversations/item_retrieve_params.rb b/lib/openai/models/conversations/item_retrieve_params.rb new file mode 100644 index 00000000..b6ef4a2f --- /dev/null +++ b/lib/openai/models/conversations/item_retrieve_params.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + # @see OpenAI::Resources::Conversations::Items#retrieve + class ItemRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute conversation_id + # + # @return [String] + required :conversation_id, String + + # @!attribute include + # Additional fields to include in the response. See the `include` parameter for + # [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + # for more information. + # + # @return [Array, nil] + optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] } + + # @!method initialize(conversation_id:, include: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ItemRetrieveParams} for more details. + # + # @param conversation_id [String] + # + # @param include [Array] Additional fields to include in the response. See the `include` + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/conversations/lob_prob.rb b/lib/openai/models/conversations/lob_prob.rb new file mode 100644 index 00000000..60d72ed2 --- /dev/null +++ b/lib/openai/models/conversations/lob_prob.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class LobProb < OpenAI::Internal::Type::BaseModel + # @!attribute token + # + # @return [String] + required :token, String + + # @!attribute bytes + # + # @return [Array] + required :bytes, OpenAI::Internal::Type::ArrayOf[Integer] + + # @!attribute logprob + # + # @return [Float] + required :logprob, Float + + # @!attribute top_logprobs + # + # @return [Array] + required :top_logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Conversations::TopLogProb] } + + # @!method initialize(token:, bytes:, logprob:, top_logprobs:) + # @param token [String] + # @param bytes [Array] + # @param logprob [Float] + # @param top_logprobs [Array] + end + end + end +end diff --git a/lib/openai/models/conversations/message.rb b/lib/openai/models/conversations/message.rb new file mode 100644 index 00000000..5b620a90 --- /dev/null +++ b/lib/openai/models/conversations/message.rb @@ -0,0 +1,115 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class Message < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the message. + # + # @return [String] + required :id, String + + # @!attribute content + # The content of the message + # + # @return [Array] + required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::Message::Content] } + + # @!attribute role + # The role of the message. One of `unknown`, `user`, `assistant`, `system`, + # `critic`, `discriminator`, `developer`, or `tool`. + # + # @return [Symbol, OpenAI::Models::Conversations::Message::Role] + required :role, enum: -> { OpenAI::Conversations::Message::Role } + + # @!attribute status + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + # + # @return [Symbol, OpenAI::Models::Conversations::Message::Status] + required :status, enum: -> { OpenAI::Conversations::Message::Status } + + # @!attribute type + # The type of the message. Always set to `message`. + # + # @return [Symbol, :message] + required :type, const: :message + + # @!method initialize(id:, content:, role:, status:, type: :message) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::Message} for more details. + # + # @param id [String] The unique ID of the message. + # + # @param content [Array] The content of the message + # + # @param role [Symbol, OpenAI::Models::Conversations::Message::Role] The role of the message. One of `unknown`, `user`, `assistant`, `system`, `criti + # + # @param status [Symbol, OpenAI::Models::Conversations::Message::Status] The status of item. One of `in_progress`, `completed`, or `incomplete`. Populate + # + # @param type [Symbol, :message] The type of the message. Always set to `message`. + + module Content + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :input_text, -> { OpenAI::Conversations::InputTextContent } + + variant :output_text, -> { OpenAI::Conversations::OutputTextContent } + + variant :text, -> { OpenAI::Conversations::TextContent } + + variant :summary_text, -> { OpenAI::Conversations::SummaryTextContent } + + variant :refusal, -> { OpenAI::Conversations::RefusalContent } + + variant :input_image, -> { OpenAI::Conversations::InputImageContent } + + variant :computer_screenshot, -> { OpenAI::Conversations::ComputerScreenshotContent } + + variant :input_file, -> { OpenAI::Conversations::InputFileContent } + + # @!method self.variants + # @return [Array(OpenAI::Models::Conversations::InputTextContent, OpenAI::Models::Conversations::OutputTextContent, OpenAI::Models::Conversations::TextContent, OpenAI::Models::Conversations::SummaryTextContent, OpenAI::Models::Conversations::RefusalContent, OpenAI::Models::Conversations::InputImageContent, OpenAI::Models::Conversations::ComputerScreenshotContent, OpenAI::Models::Conversations::InputFileContent)] + end + + # The role of the message. One of `unknown`, `user`, `assistant`, `system`, + # `critic`, `discriminator`, `developer`, or `tool`. + # + # @see OpenAI::Models::Conversations::Message#role + module Role + extend OpenAI::Internal::Type::Enum + + UNKNOWN = :unknown + USER = :user + ASSISTANT = :assistant + SYSTEM = :system + CRITIC = :critic + DISCRIMINATOR = :discriminator + DEVELOPER = :developer + TOOL = :tool + + # @!method self.values + # @return [Array] + end + + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + # + # @see OpenAI::Models::Conversations::Message#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/conversations/output_text_content.rb b/lib/openai/models/conversations/output_text_content.rb new file mode 100644 index 00000000..23791d5b --- /dev/null +++ b/lib/openai/models/conversations/output_text_content.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class OutputTextContent < OpenAI::Internal::Type::BaseModel + # @!attribute annotations + # The annotations of the text output. + # + # @return [Array] + required :annotations, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::OutputTextContent::Annotation] } + + # @!attribute text + # The text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the output text. Always `output_text`. + # + # @return [Symbol, :output_text] + required :type, const: :output_text + + # @!attribute logprobs + # + # @return [Array, nil] + optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Conversations::LobProb] } + + # @!method initialize(annotations:, text:, logprobs: nil, type: :output_text) + # @param annotations [Array] The annotations of the text output. + # + # @param text [String] The text output from the model. + # + # @param logprobs [Array] + # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + + module Annotation + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :file_citation, -> { OpenAI::Conversations::FileCitationBody } + + variant :url_citation, -> { OpenAI::Conversations::URLCitationBody } + + variant :container_file_citation, -> { OpenAI::Conversations::ContainerFileCitationBody } + + # @!method self.variants + # @return [Array(OpenAI::Models::Conversations::FileCitationBody, OpenAI::Models::Conversations::URLCitationBody, OpenAI::Models::Conversations::ContainerFileCitationBody)] + end + end + end + end +end diff --git a/lib/openai/models/conversations/refusal_content.rb b/lib/openai/models/conversations/refusal_content.rb new file mode 100644 index 00000000..2b1cdce3 --- /dev/null +++ b/lib/openai/models/conversations/refusal_content.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class RefusalContent < OpenAI::Internal::Type::BaseModel + # @!attribute refusal + # The refusal explanation from the model. + # + # @return [String] + required :refusal, String + + # @!attribute type + # The type of the refusal. Always `refusal`. + # + # @return [Symbol, :refusal] + required :type, const: :refusal + + # @!method initialize(refusal:, type: :refusal) + # @param refusal [String] The refusal explanation from the model. + # + # @param type [Symbol, :refusal] The type of the refusal. Always `refusal`. + end + end + end +end diff --git a/lib/openai/models/conversations/summary_text_content.rb b/lib/openai/models/conversations/summary_text_content.rb new file mode 100644 index 00000000..e3768df9 --- /dev/null +++ b/lib/openai/models/conversations/summary_text_content.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class SummaryTextContent < OpenAI::Internal::Type::BaseModel + # @!attribute text + # + # @return [String] + required :text, String + + # @!attribute type + # + # @return [Symbol, :summary_text] + required :type, const: :summary_text + + # @!method initialize(text:, type: :summary_text) + # @param text [String] + # @param type [Symbol, :summary_text] + end + end + end +end diff --git a/lib/openai/models/conversations/text_content.rb b/lib/openai/models/conversations/text_content.rb new file mode 100644 index 00000000..3fa27b08 --- /dev/null +++ b/lib/openai/models/conversations/text_content.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class TextContent < OpenAI::Internal::Type::BaseModel + # @!attribute text + # + # @return [String] + required :text, String + + # @!attribute type + # + # @return [Symbol, :text] + required :type, const: :text + + # @!method initialize(text:, type: :text) + # @param text [String] + # @param type [Symbol, :text] + end + end + end +end diff --git a/lib/openai/models/conversations/top_log_prob.rb b/lib/openai/models/conversations/top_log_prob.rb new file mode 100644 index 00000000..4677b3bf --- /dev/null +++ b/lib/openai/models/conversations/top_log_prob.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class TopLogProb < OpenAI::Internal::Type::BaseModel + # @!attribute token + # + # @return [String] + required :token, String + + # @!attribute bytes + # + # @return [Array] + required :bytes, OpenAI::Internal::Type::ArrayOf[Integer] + + # @!attribute logprob + # + # @return [Float] + required :logprob, Float + + # @!method initialize(token:, bytes:, logprob:) + # @param token [String] + # @param bytes [Array] + # @param logprob [Float] + end + end + end +end diff --git a/lib/openai/models/conversations/url_citation_body.rb b/lib/openai/models/conversations/url_citation_body.rb new file mode 100644 index 00000000..bab5ffdf --- /dev/null +++ b/lib/openai/models/conversations/url_citation_body.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Conversations + class URLCitationBody < OpenAI::Internal::Type::BaseModel + # @!attribute end_index + # The index of the last character of the URL citation in the message. + # + # @return [Integer] + required :end_index, Integer + + # @!attribute start_index + # The index of the first character of the URL citation in the message. + # + # @return [Integer] + required :start_index, Integer + + # @!attribute title + # The title of the web resource. + # + # @return [String] + required :title, String + + # @!attribute type + # The type of the URL citation. Always `url_citation`. + # + # @return [Symbol, :url_citation] + required :type, const: :url_citation + + # @!attribute url + # The URL of the web resource. + # + # @return [String] + required :url, String + + # @!method initialize(end_index:, start_index:, title:, url:, type: :url_citation) + # @param end_index [Integer] The index of the last character of the URL citation in the message. + # + # @param start_index [Integer] The index of the first character of the URL citation in the message. + # + # @param title [String] The title of the web resource. + # + # @param url [String] The URL of the web resource. + # + # @param type [Symbol, :url_citation] The type of the URL citation. Always `url_citation`. + end + end + end +end diff --git a/lib/openai/models/create_embedding_response.rb b/lib/openai/models/create_embedding_response.rb index a47238ef..a7bcbff1 100644 --- a/lib/openai/models/create_embedding_response.rb +++ b/lib/openai/models/create_embedding_response.rb @@ -2,12 +2,13 @@ module OpenAI module Models - class CreateEmbeddingResponse < OpenAI::BaseModel + # @see OpenAI::Resources::Embeddings#create + class CreateEmbeddingResponse < OpenAI::Internal::Type::BaseModel # @!attribute data # The list of embeddings generated by the model. # # @return [Array] - required :data, -> { OpenAI::ArrayOf[OpenAI::Models::Embedding] } + required :data, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Embedding] } # @!attribute model # The name of the model used to generate the embedding. @@ -25,19 +26,19 @@ class CreateEmbeddingResponse < OpenAI::BaseModel # The usage information for the request. # # @return [OpenAI::Models::CreateEmbeddingResponse::Usage] - required :usage, -> { OpenAI::Models::CreateEmbeddingResponse::Usage } + required :usage, -> { OpenAI::CreateEmbeddingResponse::Usage } - # @!parse - # # @param data [Array] - # # @param model [String] - # # @param usage [OpenAI::Models::CreateEmbeddingResponse::Usage] - # # @param object [Symbol, :list] - # # - # def initialize(data:, model:, usage:, object: :list, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, model:, usage:, object: :list) + # @param data [Array] The list of embeddings generated by the model. + # + # @param model [String] The name of the model used to generate the embedding. + # + # @param usage [OpenAI::Models::CreateEmbeddingResponse::Usage] The usage information for the request. + # + # @param object [Symbol, :list] The object type, which is always "list". - class Usage < OpenAI::BaseModel + # @see OpenAI::Models::CreateEmbeddingResponse#usage + class Usage < OpenAI::Internal::Type::BaseModel # @!attribute prompt_tokens # The number of tokens used by the prompt. # @@ -50,15 +51,12 @@ class Usage < OpenAI::BaseModel # @return [Integer] required :total_tokens, Integer - # @!parse - # # The usage information for the request. - # # - # # @param prompt_tokens [Integer] - # # @param total_tokens [Integer] - # # - # def initialize(prompt_tokens:, total_tokens:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(prompt_tokens:, total_tokens:) + # The usage information for the request. + # + # @param prompt_tokens [Integer] The number of tokens used by the prompt. + # + # @param total_tokens [Integer] The total number of tokens used by the request. end end end diff --git a/lib/openai/models/custom_tool_input_format.rb b/lib/openai/models/custom_tool_input_format.rb new file mode 100644 index 00000000..e716a67d --- /dev/null +++ b/lib/openai/models/custom_tool_input_format.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # The input format for the custom tool. Default is unconstrained text. + module CustomToolInputFormat + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Unconstrained free-form text. + variant :text, -> { OpenAI::CustomToolInputFormat::Text } + + # A grammar defined by the user. + variant :grammar, -> { OpenAI::CustomToolInputFormat::Grammar } + + class Text < OpenAI::Internal::Type::BaseModel + # @!attribute type + # Unconstrained text format. Always `text`. + # + # @return [Symbol, :text] + required :type, const: :text + + # @!method initialize(type: :text) + # Unconstrained free-form text. + # + # @param type [Symbol, :text] Unconstrained text format. Always `text`. + end + + class Grammar < OpenAI::Internal::Type::BaseModel + # @!attribute definition + # The grammar definition. + # + # @return [String] + required :definition, String + + # @!attribute syntax + # The syntax of the grammar definition. One of `lark` or `regex`. + # + # @return [Symbol, OpenAI::Models::CustomToolInputFormat::Grammar::Syntax] + required :syntax, enum: -> { OpenAI::CustomToolInputFormat::Grammar::Syntax } + + # @!attribute type + # Grammar format. Always `grammar`. + # + # @return [Symbol, :grammar] + required :type, const: :grammar + + # @!method initialize(definition:, syntax:, type: :grammar) + # A grammar defined by the user. + # + # @param definition [String] The grammar definition. + # + # @param syntax [Symbol, OpenAI::Models::CustomToolInputFormat::Grammar::Syntax] The syntax of the grammar definition. One of `lark` or `regex`. + # + # @param type [Symbol, :grammar] Grammar format. Always `grammar`. + + # The syntax of the grammar definition. One of `lark` or `regex`. + # + # @see OpenAI::Models::CustomToolInputFormat::Grammar#syntax + module Syntax + extend OpenAI::Internal::Type::Enum + + LARK = :lark + REGEX = :regex + + # @!method self.values + # @return [Array] + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::CustomToolInputFormat::Text, OpenAI::Models::CustomToolInputFormat::Grammar)] + end + end +end diff --git a/lib/openai/models/embedding.rb b/lib/openai/models/embedding.rb index 5ac4c6c9..9a511def 100644 --- a/lib/openai/models/embedding.rb +++ b/lib/openai/models/embedding.rb @@ -2,14 +2,14 @@ module OpenAI module Models - class Embedding < OpenAI::BaseModel + class Embedding < OpenAI::Internal::Type::BaseModel # @!attribute embedding # The embedding vector, which is a list of floats. The length of vector depends on - # the model as listed in the - # [embedding guide](https://platform.openai.com/docs/guides/embeddings). + # the model as listed in the + # [embedding guide](https://platform.openai.com/docs/guides/embeddings). # # @return [Array] - required :embedding, OpenAI::ArrayOf[Float] + required :embedding, OpenAI::Internal::Type::ArrayOf[Float] # @!attribute index # The index of the embedding in the list of embeddings. @@ -23,16 +23,17 @@ class Embedding < OpenAI::BaseModel # @return [Symbol, :embedding] required :object, const: :embedding - # @!parse - # # Represents an embedding vector returned by embedding endpoint. - # # - # # @param embedding [Array] - # # @param index [Integer] - # # @param object [Symbol, :embedding] - # # - # def initialize(embedding:, index:, object: :embedding, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(embedding:, index:, object: :embedding) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Embedding} for more details. + # + # Represents an embedding vector returned by embedding endpoint. + # + # @param embedding [Array] The embedding vector, which is a list of floats. The length of vector depends on + # + # @param index [Integer] The index of the embedding in the list of embeddings. + # + # @param object [Symbol, :embedding] The object type, which is always "embedding". end end end diff --git a/lib/openai/models/embedding_create_params.rb b/lib/openai/models/embedding_create_params.rb index 7bfe8e35..fea9547e 100644 --- a/lib/openai/models/embedding_create_params.rb +++ b/lib/openai/models/embedding_create_params.rb @@ -2,133 +2,137 @@ module OpenAI module Models - class EmbeddingCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Embeddings#create + class EmbeddingCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute input # Input text to embed, encoded as a string or array of tokens. To embed multiple - # inputs in a single request, pass an array of strings or array of token arrays. - # The input must not exceed the max input tokens for the model (8192 tokens for - # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 - # dimensions or less. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. Some models may also impose a limit on total number of - # tokens summed across inputs. + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # all embedding models), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. In addition to the per-input token limit, all embedding + # models enforce a maximum of 300,000 tokens summed across all inputs in a single + # request. # # @return [String, Array, Array, Array>] - required :input, union: -> { OpenAI::Models::EmbeddingCreateParams::Input } + required :input, union: -> { OpenAI::EmbeddingCreateParams::Input } # @!attribute model # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. # # @return [String, Symbol, OpenAI::Models::EmbeddingModel] - required :model, union: -> { OpenAI::Models::EmbeddingCreateParams::Model } + required :model, union: -> { OpenAI::EmbeddingCreateParams::Model } - # @!attribute [r] dimensions + # @!attribute dimensions # The number of dimensions the resulting output embeddings should have. Only - # supported in `text-embedding-3` and later models. + # supported in `text-embedding-3` and later models. # # @return [Integer, nil] optional :dimensions, Integer - # @!parse - # # @return [Integer] - # attr_writer :dimensions - - # @!attribute [r] encoding_format + # @!attribute encoding_format # The format to return the embeddings in. Can be either `float` or - # [`base64`](https://pypi.org/project/pybase64/). + # [`base64`](https://pypi.org/project/pybase64/). # # @return [Symbol, OpenAI::Models::EmbeddingCreateParams::EncodingFormat, nil] - optional :encoding_format, enum: -> { OpenAI::Models::EmbeddingCreateParams::EncodingFormat } + optional :encoding_format, enum: -> { OpenAI::EmbeddingCreateParams::EncodingFormat } - # @!parse - # # @return [Symbol, OpenAI::Models::EmbeddingCreateParams::EncodingFormat] - # attr_writer :encoding_format - - # @!attribute [r] user + # @!attribute user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String - # @!parse - # # @return [String] - # attr_writer :user - - # @!parse - # # @param input [String, Array, Array, Array>] - # # @param model [String, Symbol, OpenAI::Models::EmbeddingModel] - # # @param dimensions [Integer] - # # @param encoding_format [Symbol, OpenAI::Models::EmbeddingCreateParams::EncodingFormat] - # # @param user [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(input:, model:, dimensions: nil, encoding_format: nil, user: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(input:, model:, dimensions: nil, encoding_format: nil, user: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EmbeddingCreateParams} for more details. # - # Input text to embed, encoded as a string or array of tokens. To embed multiple - # inputs in a single request, pass an array of strings or array of token arrays. - # The input must not exceed the max input tokens for the model (8192 tokens for - # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 - # dimensions or less. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. Some models may also impose a limit on total number of - # tokens summed across inputs. - class Input < OpenAI::Union - StringArray = OpenAI::ArrayOf[String] - - IntegerArray = OpenAI::ArrayOf[Integer] + # @param input [String, Array, Array, Array>] Input text to embed, encoded as a string or array of tokens. To embed multiple i + # + # @param model [String, Symbol, OpenAI::Models::EmbeddingModel] ID of the model to use. You can use the [List models](https://platform.openai.co + # + # @param dimensions [Integer] The number of dimensions the resulting output embeddings should have. Only suppo + # + # @param encoding_format [Symbol, OpenAI::Models::EmbeddingCreateParams::EncodingFormat] The format to return the embeddings in. Can be either `float` or [`base64`](http + # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - ArrayOfToken2DArray = OpenAI::ArrayOf[OpenAI::ArrayOf[Integer]] + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # all embedding models), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. In addition to the per-input token limit, all embedding + # models enforce a maximum of 300,000 tokens summed across all inputs in a single + # request. + module Input + extend OpenAI::Internal::Type::Union # The string that will be turned into an embedding. variant String # The array of strings that will be turned into an embedding. - variant OpenAI::Models::EmbeddingCreateParams::Input::StringArray + variant -> { OpenAI::Models::EmbeddingCreateParams::Input::StringArray } # The array of integers that will be turned into an embedding. - variant OpenAI::Models::EmbeddingCreateParams::Input::IntegerArray + variant -> { OpenAI::Models::EmbeddingCreateParams::Input::IntegerArray } # The array of arrays containing integers that will be turned into an embedding. - variant OpenAI::Models::EmbeddingCreateParams::Input::ArrayOfToken2DArray + variant -> { OpenAI::Models::EmbeddingCreateParams::Input::ArrayOfToken2DArray } + + # @!method self.variants + # @return [Array(String, Array, Array, Array>)] + + # @type [OpenAI::Internal::Type::Converter] + StringArray = OpenAI::Internal::Type::ArrayOf[String] + + # @type [OpenAI::Internal::Type::Converter] + IntegerArray = OpenAI::Internal::Type::ArrayOf[Integer] + + # @type [OpenAI::Internal::Type::Converter] + ArrayOfToken2DArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::ArrayOf[Integer]] end - # @abstract - # # ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - class Model < OpenAI::Union + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + module Model + extend OpenAI::Internal::Type::Union + variant String # ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them. - variant enum: -> { OpenAI::Models::EmbeddingModel } + variant enum: -> { OpenAI::EmbeddingModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::EmbeddingModel)] end - # @abstract - # # The format to return the embeddings in. Can be either `float` or - # [`base64`](https://pypi.org/project/pybase64/). - class EncodingFormat < OpenAI::Enum + # [`base64`](https://pypi.org/project/pybase64/). + module EncodingFormat + extend OpenAI::Internal::Type::Enum + FLOAT = :float BASE64 = :base64 - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/embedding_model.rb b/lib/openai/models/embedding_model.rb index ae14fe32..0692fbb1 100644 --- a/lib/openai/models/embedding_model.rb +++ b/lib/openai/models/embedding_model.rb @@ -2,14 +2,15 @@ module OpenAI module Models - # @abstract - # - class EmbeddingModel < OpenAI::Enum + module EmbeddingModel + extend OpenAI::Internal::Type::Enum + TEXT_EMBEDDING_ADA_002 = :"text-embedding-ada-002" TEXT_EMBEDDING_3_SMALL = :"text-embedding-3-small" TEXT_EMBEDDING_3_LARGE = :"text-embedding-3-large" - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/error_object.rb b/lib/openai/models/error_object.rb index c6aaddc6..b33d574a 100644 --- a/lib/openai/models/error_object.rb +++ b/lib/openai/models/error_object.rb @@ -2,7 +2,7 @@ module OpenAI module Models - class ErrorObject < OpenAI::BaseModel + class ErrorObject < OpenAI::Internal::Type::BaseModel # @!attribute code # # @return [String, nil] @@ -23,15 +23,11 @@ class ErrorObject < OpenAI::BaseModel # @return [String] required :type, String - # @!parse - # # @param code [String, nil] - # # @param message [String] - # # @param param [String, nil] - # # @param type [String] - # # - # def initialize(code:, message:, param:, type:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(code:, message:, param:, type:) + # @param code [String, nil] + # @param message [String] + # @param param [String, nil] + # @param type [String] end end end diff --git a/lib/openai/models/eval_create_params.rb b/lib/openai/models/eval_create_params.rb new file mode 100644 index 00000000..10f1ef02 --- /dev/null +++ b/lib/openai/models/eval_create_params.rb @@ -0,0 +1,482 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Evals#create + class EvalCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute data_source_config + # The configuration for the data source used for the evaluation runs. Dictates the + # schema of the data used in the evaluation. + # + # @return [OpenAI::Models::EvalCreateParams::DataSourceConfig::Custom, OpenAI::Models::EvalCreateParams::DataSourceConfig::Logs, OpenAI::Models::EvalCreateParams::DataSourceConfig::StoredCompletions] + required :data_source_config, union: -> { OpenAI::EvalCreateParams::DataSourceConfig } + + # @!attribute testing_criteria + # A list of graders for all eval runs in this group. Graders can reference + # variables in the data source using double curly braces notation, like + # `{{item.variable_name}}`. To reference the model's output, use the `sample` + # namespace (ie, `{{sample.output_text}}`). + # + # @return [Array] + required :testing_criteria, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::EvalCreateParams::TestingCriterion] } + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute name + # The name of the evaluation. + # + # @return [String, nil] + optional :name, String + + # @!method initialize(data_source_config:, testing_criteria:, metadata: nil, name: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalCreateParams} for more details. + # + # @param data_source_config [OpenAI::Models::EvalCreateParams::DataSourceConfig::Custom, OpenAI::Models::EvalCreateParams::DataSourceConfig::Logs, OpenAI::Models::EvalCreateParams::DataSourceConfig::StoredCompletions] The configuration for the data source used for the evaluation runs. Dictates the + # + # @param testing_criteria [Array] A list of graders for all eval runs in this group. Graders can reference variabl + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] The name of the evaluation. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # The configuration for the data source used for the evaluation runs. Dictates the + # schema of the data used in the evaluation. + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A CustomDataSourceConfig object that defines the schema for the data source used for the evaluation runs. + # This schema is used to define the shape of the data that will be: + # - Used to define your testing criteria and + # - What data is required when creating a run + variant :custom, -> { OpenAI::EvalCreateParams::DataSourceConfig::Custom } + + # A data source config which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + variant :logs, -> { OpenAI::EvalCreateParams::DataSourceConfig::Logs } + + # Deprecated in favor of LogsDataSourceConfig. + variant :stored_completions, -> { OpenAI::EvalCreateParams::DataSourceConfig::StoredCompletions } + + class Custom < OpenAI::Internal::Type::BaseModel + # @!attribute item_schema + # The json schema for each row in the data source. + # + # @return [Hash{Symbol=>Object}] + required :item_schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute type + # The type of data source. Always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!attribute include_sample_schema + # Whether the eval should expect you to populate the sample namespace (ie, by + # generating responses off of your data source) + # + # @return [Boolean, nil] + optional :include_sample_schema, OpenAI::Internal::Type::Boolean + + # @!method initialize(item_schema:, include_sample_schema: nil, type: :custom) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalCreateParams::DataSourceConfig::Custom} for more details. + # + # A CustomDataSourceConfig object that defines the schema for the data source used + # for the evaluation runs. This schema is used to define the shape of the data + # that will be: + # + # - Used to define your testing criteria and + # - What data is required when creating a run + # + # @param item_schema [Hash{Symbol=>Object}] The json schema for each row in the data source. + # + # @param include_sample_schema [Boolean] Whether the eval should expect you to populate the sample namespace (ie, by gene + # + # @param type [Symbol, :custom] The type of data source. Always `custom`. + end + + class Logs < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of data source. Always `logs`. + # + # @return [Symbol, :logs] + required :type, const: :logs + + # @!attribute metadata + # Metadata filters for the logs data source. + # + # @return [Hash{Symbol=>Object}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!method initialize(metadata: nil, type: :logs) + # A data source config which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + # + # @param metadata [Hash{Symbol=>Object}] Metadata filters for the logs data source. + # + # @param type [Symbol, :logs] The type of data source. Always `logs`. + end + + # @deprecated + class StoredCompletions < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of data source. Always `stored_completions`. + # + # @return [Symbol, :stored_completions] + required :type, const: :stored_completions + + # @!attribute metadata + # Metadata filters for the stored completions data source. + # + # @return [Hash{Symbol=>Object}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!method initialize(metadata: nil, type: :stored_completions) + # Deprecated in favor of LogsDataSourceConfig. + # + # @param metadata [Hash{Symbol=>Object}] Metadata filters for the stored completions data source. + # + # @param type [Symbol, :stored_completions] The type of data source. Always `stored_completions`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::EvalCreateParams::DataSourceConfig::Custom, OpenAI::Models::EvalCreateParams::DataSourceConfig::Logs, OpenAI::Models::EvalCreateParams::DataSourceConfig::StoredCompletions)] + end + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + module TestingCriterion + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A LabelModelGrader object which uses a model to assign labels to each item + # in the evaluation. + variant :label_model, -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel } + + # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + variant :string_check, -> { OpenAI::Graders::StringCheckGrader } + + # A TextSimilarityGrader object which grades text based on similarity metrics. + variant :text_similarity, -> { OpenAI::EvalCreateParams::TestingCriterion::TextSimilarity } + + # A PythonGrader object that runs a python script on the input. + variant :python, -> { OpenAI::EvalCreateParams::TestingCriterion::Python } + + # A ScoreModelGrader object that uses a model to assign a score to the input. + variant :score_model, -> { OpenAI::EvalCreateParams::TestingCriterion::ScoreModel } + + class LabelModel < OpenAI::Internal::Type::BaseModel + # @!attribute input + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + # + # @return [Array] + required :input, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input] } + + # @!attribute labels + # The labels to classify to each item in the evaluation. + # + # @return [Array] + required :labels, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute model + # The model to use for the evaluation. Must support structured outputs. + # + # @return [String] + required :model, String + + # @!attribute name + # The name of the grader. + # + # @return [String] + required :name, String + + # @!attribute passing_labels + # The labels that indicate a passing result. Must be a subset of labels. + # + # @return [Array] + required :passing_labels, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute type + # The object type, which is always `label_model`. + # + # @return [Symbol, :label_model] + required :type, const: :label_model + + # @!method initialize(input:, labels:, model:, name:, passing_labels:, type: :label_model) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel} for more + # details. + # + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + # + # @param input [Array] A list of chat messages forming the prompt or context. May include variable refe + # + # @param labels [Array] The labels to classify to each item in the evaluation. + # + # @param model [String] The model to use for the evaluation. Must support structured outputs. + # + # @param name [String] The name of the grader. + # + # @param passing_labels [Array] The labels that indicate a passing result. Must be a subset of labels. + # + # @param type [Symbol, :label_model] The object type, which is always `label_model`. + + # A chat message that makes up the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + module Input + extend OpenAI::Internal::Type::Union + + variant -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage } + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + variant -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem } + + class SimpleInputMessage < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the message. + # + # @return [String] + required :content, String + + # @!attribute role + # The role of the message (e.g. "system", "assistant", "user"). + # + # @return [String] + required :role, String + + # @!method initialize(content:, role:) + # @param content [String] The content of the message. + # + # @param role [String] The role of the message (e.g. "system", "assistant", "user"). + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + # @!attribute content + # Inputs to the model - can contain template strings. + # + # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage, Array] + required :content, + union: -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content } + + # @!attribute role + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @return [Symbol, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role] + required :role, enum: -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role } + + # @!attribute type + # The type of the message input. Always `message`. + # + # @return [Symbol, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type, nil] + optional :type, enum: -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type } + + # @!method initialize(content:, role:, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem} + # for more details. + # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + # + # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage, Array] Inputs to the model - can contain template strings. + # + # @param role [Symbol, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or + # + # @param type [Symbol, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type] The type of the message input. Always `message`. + + # Inputs to the model - can contain template strings. + # + # @see OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem#content + module Content + extend OpenAI::Internal::Type::Union + + # A text input to the model. + variant String + + # A text input to the model. + variant -> { OpenAI::Responses::ResponseInputText } + + # A text output from the model. + variant -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText } + + # An image input to the model. + variant -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage } + + # A list of inputs, each of which may be either an input text or input image object. + variant -> { OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::AnArrayOfInputTextAndInputImageArray } + + class OutputText < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the output text. Always `output_text`. + # + # @return [Symbol, :output_text] + required :type, const: :output_text + + # @!method initialize(text:, type: :output_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText} + # for more details. + # + # A text output from the model. + # + # @param text [String] The text output from the model. + # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + end + + class InputImage < OpenAI::Internal::Type::BaseModel + # @!attribute image_url + # The URL of the image input. + # + # @return [String] + required :image_url, String + + # @!attribute type + # The type of the image input. Always `input_image`. + # + # @return [Symbol, :input_image] + required :type, const: :input_image + + # @!attribute detail + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @return [String, nil] + optional :detail, String + + # @!method initialize(image_url:, detail: nil, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage} + # for more details. + # + # An image input to the model. + # + # @param image_url [String] The URL of the image input. + # + # @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param type [Symbol, :input_image] The type of the image input. Always `input_image`. + end + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage, Array)] + + # @type [OpenAI::Internal::Type::Converter] + AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @see OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem#role + module Role + extend OpenAI::Internal::Type::Enum + + USER = :user + ASSISTANT = :assistant + SYSTEM = :system + DEVELOPER = :developer + + # @!method self.values + # @return [Array] + end + + # The type of the message input. Always `message`. + # + # @see OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem#type + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE = :message + + # @!method self.values + # @return [Array] + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem)] + end + end + + class TextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float] + required :pass_threshold, Float + + # @!method initialize(pass_threshold:) + # A TextSimilarityGrader object which grades text based on similarity metrics. + # + # @param pass_threshold [Float] The threshold for the score. + end + + class Python < OpenAI::Models::Graders::PythonGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float, nil] + optional :pass_threshold, Float + + # @!method initialize(pass_threshold: nil) + # A PythonGrader object that runs a python script on the input. + # + # @param pass_threshold [Float] The threshold for the score. + end + + class ScoreModel < OpenAI::Models::Graders::ScoreModelGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float, nil] + optional :pass_threshold, Float + + # @!method initialize(pass_threshold: nil) + # A ScoreModelGrader object that uses a model to assign a score to the input. + # + # @param pass_threshold [Float] The threshold for the score. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalCreateParams::TestingCriterion::TextSimilarity, OpenAI::Models::EvalCreateParams::TestingCriterion::Python, OpenAI::Models::EvalCreateParams::TestingCriterion::ScoreModel)] + end + end + end +end diff --git a/lib/openai/models/eval_create_response.rb b/lib/openai/models/eval_create_response.rb new file mode 100644 index 00000000..64d013fc --- /dev/null +++ b/lib/openai/models/eval_create_response.rb @@ -0,0 +1,214 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Evals#create + class EvalCreateResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the evaluation. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) for when the eval was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data_source_config + # Configuration of data sources used in runs of the evaluation. + # + # @return [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] + required :data_source_config, union: -> { OpenAI::Models::EvalCreateResponse::DataSourceConfig } + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute name + # The name of the evaluation. + # + # @return [String] + required :name, String + + # @!attribute object + # The object type. + # + # @return [Symbol, :eval] + required :object, const: :eval + + # @!attribute testing_criteria + # A list of testing criteria. + # + # @return [Array] + required :testing_criteria, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalCreateResponse::TestingCriterion] } + + # @!method initialize(id:, created_at:, data_source_config:, metadata:, name:, testing_criteria:, object: :eval) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalCreateResponse} for more details. + # + # An Eval object with a data source config and testing criteria. An Eval + # represents a task to be done for your LLM integration. Like: + # + # - Improve the quality of my chatbot + # - See how well my chatbot handles customer support + # - Check if o4-mini is better at my usecase than gpt-4o + # + # @param id [String] Unique identifier for the evaluation. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the eval was created. + # + # @param data_source_config [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] Configuration of data sources used in runs of the evaluation. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] The name of the evaluation. + # + # @param testing_criteria [Array] A list of testing criteria. + # + # @param object [Symbol, :eval] The object type. + + # Configuration of data sources used in runs of the evaluation. + # + # @see OpenAI::Models::EvalCreateResponse#data_source_config + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces. + # The response schema defines the shape of the data that will be: + # - Used to define your testing criteria and + # - What data is required when creating a run + variant :custom, -> { OpenAI::EvalCustomDataSourceConfig } + + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + # The schema returned by this data source config is used to defined what variables are available in your evals. + # `item` and `sample` are both defined when using this data source config. + variant :logs, -> { OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs } + + # Deprecated in favor of LogsDataSourceConfig. + variant :stored_completions, -> { OpenAI::EvalStoredCompletionsDataSourceConfig } + + class Logs < OpenAI::Internal::Type::BaseModel + # @!attribute schema + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + # + # @return [Hash{Symbol=>Object}] + required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute type + # The type of data source. Always `logs`. + # + # @return [Symbol, :logs] + required :type, const: :logs + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!method initialize(schema:, metadata: nil, type: :logs) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs} for more details. + # + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + # schema returned by this data source config is used to defined what variables are + # available in your evals. `item` and `sample` are both defined when using this + # data source config. + # + # @param schema [Hash{Symbol=>Object}] The json schema for the run data source items. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param type [Symbol, :logs] The type of data source. Always `logs`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig)] + end + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + module TestingCriterion + extend OpenAI::Internal::Type::Union + + # A LabelModelGrader object which uses a model to assign labels to each item + # in the evaluation. + variant -> { OpenAI::Graders::LabelModelGrader } + + # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + variant -> { OpenAI::Graders::StringCheckGrader } + + # A TextSimilarityGrader object which grades text based on similarity metrics. + variant -> { OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity } + + # A PythonGrader object that runs a python script on the input. + variant -> { OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython } + + # A ScoreModelGrader object that uses a model to assign a score to the input. + variant -> { OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel } + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float] + required :pass_threshold, Float + + # @!method initialize(pass_threshold:) + # A TextSimilarityGrader object which grades text based on similarity metrics. + # + # @param pass_threshold [Float] The threshold for the score. + end + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float, nil] + optional :pass_threshold, Float + + # @!method initialize(pass_threshold: nil) + # A PythonGrader object that runs a python script on the input. + # + # @param pass_threshold [Float] The threshold for the score. + end + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float, nil] + optional :pass_threshold, Float + + # @!method initialize(pass_threshold: nil) + # A ScoreModelGrader object that uses a model to assign a score to the input. + # + # @param pass_threshold [Float] The threshold for the score. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel)] + end + end + end +end diff --git a/lib/openai/models/eval_custom_data_source_config.rb b/lib/openai/models/eval_custom_data_source_config.rb new file mode 100644 index 00000000..5ff8b5db --- /dev/null +++ b/lib/openai/models/eval_custom_data_source_config.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class EvalCustomDataSourceConfig < OpenAI::Internal::Type::BaseModel + # @!attribute schema + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + # + # @return [Hash{Symbol=>Object}] + required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute type + # The type of data source. Always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!method initialize(schema:, type: :custom) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalCustomDataSourceConfig} for more details. + # + # A CustomDataSourceConfig which specifies the schema of your `item` and + # optionally `sample` namespaces. The response schema defines the shape of the + # data that will be: + # + # - Used to define your testing criteria and + # - What data is required when creating a run + # + # @param schema [Hash{Symbol=>Object}] The json schema for the run data source items. + # + # @param type [Symbol, :custom] The type of data source. Always `custom`. + end + end +end diff --git a/lib/openai/models/eval_delete_params.rb b/lib/openai/models/eval_delete_params.rb new file mode 100644 index 00000000..7e1938d2 --- /dev/null +++ b/lib/openai/models/eval_delete_params.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Evals#delete + class EvalDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end +end diff --git a/lib/openai/models/eval_delete_response.rb b/lib/openai/models/eval_delete_response.rb new file mode 100644 index 00000000..5495ca3d --- /dev/null +++ b/lib/openai/models/eval_delete_response.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Evals#delete + class EvalDeleteResponse < OpenAI::Internal::Type::BaseModel + # @!attribute deleted + # + # @return [Boolean] + required :deleted, OpenAI::Internal::Type::Boolean + + # @!attribute eval_id + # + # @return [String] + required :eval_id, String + + # @!attribute object + # + # @return [String] + required :object, String + + # @!method initialize(deleted:, eval_id:, object:) + # @param deleted [Boolean] + # @param eval_id [String] + # @param object [String] + end + end +end diff --git a/lib/openai/models/eval_list_params.rb b/lib/openai/models/eval_list_params.rb new file mode 100644 index 00000000..ff5f8d60 --- /dev/null +++ b/lib/openai/models/eval_list_params.rb @@ -0,0 +1,75 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Evals#list + class EvalListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute after + # Identifier for the last eval from the previous pagination request. + # + # @return [String, nil] + optional :after, String + + # @!attribute limit + # Number of evals to retrieve. + # + # @return [Integer, nil] + optional :limit, Integer + + # @!attribute order + # Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for + # descending order. + # + # @return [Symbol, OpenAI::Models::EvalListParams::Order, nil] + optional :order, enum: -> { OpenAI::EvalListParams::Order } + + # @!attribute order_by + # Evals can be ordered by creation time or last updated time. Use `created_at` for + # creation time or `updated_at` for last updated time. + # + # @return [Symbol, OpenAI::Models::EvalListParams::OrderBy, nil] + optional :order_by, enum: -> { OpenAI::EvalListParams::OrderBy } + + # @!method initialize(after: nil, limit: nil, order: nil, order_by: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalListParams} for more details. + # + # @param after [String] Identifier for the last eval from the previous pagination request. + # + # @param limit [Integer] Number of evals to retrieve. + # + # @param order [Symbol, OpenAI::Models::EvalListParams::Order] Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for d + # + # @param order_by [Symbol, OpenAI::Models::EvalListParams::OrderBy] Evals can be ordered by creation time or last updated time. Use + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for + # descending order. + module Order + extend OpenAI::Internal::Type::Enum + + ASC = :asc + DESC = :desc + + # @!method self.values + # @return [Array] + end + + # Evals can be ordered by creation time or last updated time. Use `created_at` for + # creation time or `updated_at` for last updated time. + module OrderBy + extend OpenAI::Internal::Type::Enum + + CREATED_AT = :created_at + UPDATED_AT = :updated_at + + # @!method self.values + # @return [Array] + end + end + end +end diff --git a/lib/openai/models/eval_list_response.rb b/lib/openai/models/eval_list_response.rb new file mode 100644 index 00000000..db2de0e9 --- /dev/null +++ b/lib/openai/models/eval_list_response.rb @@ -0,0 +1,214 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Evals#list + class EvalListResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the evaluation. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) for when the eval was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data_source_config + # Configuration of data sources used in runs of the evaluation. + # + # @return [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalListResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] + required :data_source_config, union: -> { OpenAI::Models::EvalListResponse::DataSourceConfig } + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute name + # The name of the evaluation. + # + # @return [String] + required :name, String + + # @!attribute object + # The object type. + # + # @return [Symbol, :eval] + required :object, const: :eval + + # @!attribute testing_criteria + # A list of testing criteria. + # + # @return [Array] + required :testing_criteria, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalListResponse::TestingCriterion] } + + # @!method initialize(id:, created_at:, data_source_config:, metadata:, name:, testing_criteria:, object: :eval) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalListResponse} for more details. + # + # An Eval object with a data source config and testing criteria. An Eval + # represents a task to be done for your LLM integration. Like: + # + # - Improve the quality of my chatbot + # - See how well my chatbot handles customer support + # - Check if o4-mini is better at my usecase than gpt-4o + # + # @param id [String] Unique identifier for the evaluation. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the eval was created. + # + # @param data_source_config [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalListResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] Configuration of data sources used in runs of the evaluation. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] The name of the evaluation. + # + # @param testing_criteria [Array] A list of testing criteria. + # + # @param object [Symbol, :eval] The object type. + + # Configuration of data sources used in runs of the evaluation. + # + # @see OpenAI::Models::EvalListResponse#data_source_config + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces. + # The response schema defines the shape of the data that will be: + # - Used to define your testing criteria and + # - What data is required when creating a run + variant :custom, -> { OpenAI::EvalCustomDataSourceConfig } + + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + # The schema returned by this data source config is used to defined what variables are available in your evals. + # `item` and `sample` are both defined when using this data source config. + variant :logs, -> { OpenAI::Models::EvalListResponse::DataSourceConfig::Logs } + + # Deprecated in favor of LogsDataSourceConfig. + variant :stored_completions, -> { OpenAI::EvalStoredCompletionsDataSourceConfig } + + class Logs < OpenAI::Internal::Type::BaseModel + # @!attribute schema + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + # + # @return [Hash{Symbol=>Object}] + required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute type + # The type of data source. Always `logs`. + # + # @return [Symbol, :logs] + required :type, const: :logs + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!method initialize(schema:, metadata: nil, type: :logs) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalListResponse::DataSourceConfig::Logs} for more details. + # + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + # schema returned by this data source config is used to defined what variables are + # available in your evals. `item` and `sample` are both defined when using this + # data source config. + # + # @param schema [Hash{Symbol=>Object}] The json schema for the run data source items. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param type [Symbol, :logs] The type of data source. Always `logs`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalListResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig)] + end + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + module TestingCriterion + extend OpenAI::Internal::Type::Union + + # A LabelModelGrader object which uses a model to assign labels to each item + # in the evaluation. + variant -> { OpenAI::Graders::LabelModelGrader } + + # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + variant -> { OpenAI::Graders::StringCheckGrader } + + # A TextSimilarityGrader object which grades text based on similarity metrics. + variant -> { OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity } + + # A PythonGrader object that runs a python script on the input. + variant -> { OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython } + + # A ScoreModelGrader object that uses a model to assign a score to the input. + variant -> { OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel } + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float] + required :pass_threshold, Float + + # @!method initialize(pass_threshold:) + # A TextSimilarityGrader object which grades text based on similarity metrics. + # + # @param pass_threshold [Float] The threshold for the score. + end + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float, nil] + optional :pass_threshold, Float + + # @!method initialize(pass_threshold: nil) + # A PythonGrader object that runs a python script on the input. + # + # @param pass_threshold [Float] The threshold for the score. + end + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float, nil] + optional :pass_threshold, Float + + # @!method initialize(pass_threshold: nil) + # A ScoreModelGrader object that uses a model to assign a score to the input. + # + # @param pass_threshold [Float] The threshold for the score. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel)] + end + end + end +end diff --git a/lib/openai/models/eval_retrieve_params.rb b/lib/openai/models/eval_retrieve_params.rb new file mode 100644 index 00000000..2dcaa7bb --- /dev/null +++ b/lib/openai/models/eval_retrieve_params.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Evals#retrieve + class EvalRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end +end diff --git a/lib/openai/models/eval_retrieve_response.rb b/lib/openai/models/eval_retrieve_response.rb new file mode 100644 index 00000000..04a1e866 --- /dev/null +++ b/lib/openai/models/eval_retrieve_response.rb @@ -0,0 +1,214 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Evals#retrieve + class EvalRetrieveResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the evaluation. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) for when the eval was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data_source_config + # Configuration of data sources used in runs of the evaluation. + # + # @return [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] + required :data_source_config, union: -> { OpenAI::Models::EvalRetrieveResponse::DataSourceConfig } + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute name + # The name of the evaluation. + # + # @return [String] + required :name, String + + # @!attribute object + # The object type. + # + # @return [Symbol, :eval] + required :object, const: :eval + + # @!attribute testing_criteria + # A list of testing criteria. + # + # @return [Array] + required :testing_criteria, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalRetrieveResponse::TestingCriterion] } + + # @!method initialize(id:, created_at:, data_source_config:, metadata:, name:, testing_criteria:, object: :eval) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalRetrieveResponse} for more details. + # + # An Eval object with a data source config and testing criteria. An Eval + # represents a task to be done for your LLM integration. Like: + # + # - Improve the quality of my chatbot + # - See how well my chatbot handles customer support + # - Check if o4-mini is better at my usecase than gpt-4o + # + # @param id [String] Unique identifier for the evaluation. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the eval was created. + # + # @param data_source_config [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] Configuration of data sources used in runs of the evaluation. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] The name of the evaluation. + # + # @param testing_criteria [Array] A list of testing criteria. + # + # @param object [Symbol, :eval] The object type. + + # Configuration of data sources used in runs of the evaluation. + # + # @see OpenAI::Models::EvalRetrieveResponse#data_source_config + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces. + # The response schema defines the shape of the data that will be: + # - Used to define your testing criteria and + # - What data is required when creating a run + variant :custom, -> { OpenAI::EvalCustomDataSourceConfig } + + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + # The schema returned by this data source config is used to defined what variables are available in your evals. + # `item` and `sample` are both defined when using this data source config. + variant :logs, -> { OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs } + + # Deprecated in favor of LogsDataSourceConfig. + variant :stored_completions, -> { OpenAI::EvalStoredCompletionsDataSourceConfig } + + class Logs < OpenAI::Internal::Type::BaseModel + # @!attribute schema + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + # + # @return [Hash{Symbol=>Object}] + required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute type + # The type of data source. Always `logs`. + # + # @return [Symbol, :logs] + required :type, const: :logs + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!method initialize(schema:, metadata: nil, type: :logs) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs} for more details. + # + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + # schema returned by this data source config is used to defined what variables are + # available in your evals. `item` and `sample` are both defined when using this + # data source config. + # + # @param schema [Hash{Symbol=>Object}] The json schema for the run data source items. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param type [Symbol, :logs] The type of data source. Always `logs`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig)] + end + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + module TestingCriterion + extend OpenAI::Internal::Type::Union + + # A LabelModelGrader object which uses a model to assign labels to each item + # in the evaluation. + variant -> { OpenAI::Graders::LabelModelGrader } + + # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + variant -> { OpenAI::Graders::StringCheckGrader } + + # A TextSimilarityGrader object which grades text based on similarity metrics. + variant -> { OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity } + + # A PythonGrader object that runs a python script on the input. + variant -> { OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython } + + # A ScoreModelGrader object that uses a model to assign a score to the input. + variant -> { OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel } + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float] + required :pass_threshold, Float + + # @!method initialize(pass_threshold:) + # A TextSimilarityGrader object which grades text based on similarity metrics. + # + # @param pass_threshold [Float] The threshold for the score. + end + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float, nil] + optional :pass_threshold, Float + + # @!method initialize(pass_threshold: nil) + # A PythonGrader object that runs a python script on the input. + # + # @param pass_threshold [Float] The threshold for the score. + end + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float, nil] + optional :pass_threshold, Float + + # @!method initialize(pass_threshold: nil) + # A ScoreModelGrader object that uses a model to assign a score to the input. + # + # @param pass_threshold [Float] The threshold for the score. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel)] + end + end + end +end diff --git a/lib/openai/models/eval_stored_completions_data_source_config.rb b/lib/openai/models/eval_stored_completions_data_source_config.rb new file mode 100644 index 00000000..2a57fdfd --- /dev/null +++ b/lib/openai/models/eval_stored_completions_data_source_config.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @deprecated + class EvalStoredCompletionsDataSourceConfig < OpenAI::Internal::Type::BaseModel + # @!attribute schema + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + # + # @return [Hash{Symbol=>Object}] + required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute type + # The type of data source. Always `stored_completions`. + # + # @return [Symbol, :stored_completions] + required :type, const: :stored_completions + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!method initialize(schema:, metadata: nil, type: :stored_completions) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalStoredCompletionsDataSourceConfig} for more details. + # + # Deprecated in favor of LogsDataSourceConfig. + # + # @param schema [Hash{Symbol=>Object}] The json schema for the run data source items. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param type [Symbol, :stored_completions] The type of data source. Always `stored_completions`. + end + end +end diff --git a/lib/openai/models/eval_update_params.rb b/lib/openai/models/eval_update_params.rb new file mode 100644 index 00000000..2b7d40b3 --- /dev/null +++ b/lib/openai/models/eval_update_params.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Evals#update + class EvalUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute name + # Rename the evaluation. + # + # @return [String, nil] + optional :name, String + + # @!method initialize(metadata: nil, name: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalUpdateParams} for more details. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] Rename the evaluation. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end +end diff --git a/lib/openai/models/eval_update_response.rb b/lib/openai/models/eval_update_response.rb new file mode 100644 index 00000000..475374c9 --- /dev/null +++ b/lib/openai/models/eval_update_response.rb @@ -0,0 +1,214 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # @see OpenAI::Resources::Evals#update + class EvalUpdateResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the evaluation. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) for when the eval was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data_source_config + # Configuration of data sources used in runs of the evaluation. + # + # @return [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] + required :data_source_config, union: -> { OpenAI::Models::EvalUpdateResponse::DataSourceConfig } + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute name + # The name of the evaluation. + # + # @return [String] + required :name, String + + # @!attribute object + # The object type. + # + # @return [Symbol, :eval] + required :object, const: :eval + + # @!attribute testing_criteria + # A list of testing criteria. + # + # @return [Array] + required :testing_criteria, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalUpdateResponse::TestingCriterion] } + + # @!method initialize(id:, created_at:, data_source_config:, metadata:, name:, testing_criteria:, object: :eval) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalUpdateResponse} for more details. + # + # An Eval object with a data source config and testing criteria. An Eval + # represents a task to be done for your LLM integration. Like: + # + # - Improve the quality of my chatbot + # - See how well my chatbot handles customer support + # - Check if o4-mini is better at my usecase than gpt-4o + # + # @param id [String] Unique identifier for the evaluation. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the eval was created. + # + # @param data_source_config [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] Configuration of data sources used in runs of the evaluation. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] The name of the evaluation. + # + # @param testing_criteria [Array] A list of testing criteria. + # + # @param object [Symbol, :eval] The object type. + + # Configuration of data sources used in runs of the evaluation. + # + # @see OpenAI::Models::EvalUpdateResponse#data_source_config + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces. + # The response schema defines the shape of the data that will be: + # - Used to define your testing criteria and + # - What data is required when creating a run + variant :custom, -> { OpenAI::EvalCustomDataSourceConfig } + + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + # The schema returned by this data source config is used to defined what variables are available in your evals. + # `item` and `sample` are both defined when using this data source config. + variant :logs, -> { OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs } + + # Deprecated in favor of LogsDataSourceConfig. + variant :stored_completions, -> { OpenAI::EvalStoredCompletionsDataSourceConfig } + + class Logs < OpenAI::Internal::Type::BaseModel + # @!attribute schema + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + # + # @return [Hash{Symbol=>Object}] + required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute type + # The type of data source. Always `logs`. + # + # @return [Symbol, :logs] + required :type, const: :logs + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!method initialize(schema:, metadata: nil, type: :logs) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs} for more details. + # + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + # schema returned by this data source config is used to defined what variables are + # available in your evals. `item` and `sample` are both defined when using this + # data source config. + # + # @param schema [Hash{Symbol=>Object}] The json schema for the run data source items. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param type [Symbol, :logs] The type of data source. Always `logs`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig)] + end + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + module TestingCriterion + extend OpenAI::Internal::Type::Union + + # A LabelModelGrader object which uses a model to assign labels to each item + # in the evaluation. + variant -> { OpenAI::Graders::LabelModelGrader } + + # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + variant -> { OpenAI::Graders::StringCheckGrader } + + # A TextSimilarityGrader object which grades text based on similarity metrics. + variant -> { OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity } + + # A PythonGrader object that runs a python script on the input. + variant -> { OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython } + + # A ScoreModelGrader object that uses a model to assign a score to the input. + variant -> { OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel } + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float] + required :pass_threshold, Float + + # @!method initialize(pass_threshold:) + # A TextSimilarityGrader object which grades text based on similarity metrics. + # + # @param pass_threshold [Float] The threshold for the score. + end + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float, nil] + optional :pass_threshold, Float + + # @!method initialize(pass_threshold: nil) + # A PythonGrader object that runs a python script on the input. + # + # @param pass_threshold [Float] The threshold for the score. + end + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + # @!attribute pass_threshold + # The threshold for the score. + # + # @return [Float, nil] + optional :pass_threshold, Float + + # @!method initialize(pass_threshold: nil) + # A ScoreModelGrader object that uses a model to assign a score to the input. + # + # @param pass_threshold [Float] The threshold for the score. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel)] + end + end + end +end diff --git a/lib/openai/models/evals/create_eval_completions_run_data_source.rb b/lib/openai/models/evals/create_eval_completions_run_data_source.rb new file mode 100644 index 00000000..3dc158bd --- /dev/null +++ b/lib/openai/models/evals/create_eval_completions_run_data_source.rb @@ -0,0 +1,552 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + class CreateEvalCompletionsRunDataSource < OpenAI::Internal::Type::BaseModel + # @!attribute source + # Determines what populates the `item` namespace in this run's data source. + # + # @return [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileID, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions] + required :source, union: -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source } + + # @!attribute type + # The type of run data source. Always `completions`. + # + # @return [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Type] + required :type, enum: -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::Type } + + # @!attribute input_messages + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @return [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference, nil] + optional :input_messages, union: -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages } + + # @!attribute model + # The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @return [String, nil] + optional :model, String + + # @!attribute sampling_params + # + # @return [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams, nil] + optional :sampling_params, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams } + + # @!method initialize(source:, type:, input_messages: nil, model: nil, sampling_params: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource} for more details. + # + # A CompletionsRunDataSource object describing a model sampling configuration. + # + # @param source [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileID, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions] Determines what populates the `item` namespace in this run's data source. + # + # @param type [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Type] The type of run data source. Always `completions`. + # + # @param input_messages [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i + # + # @param model [String] The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @param sampling_params [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams] + + # Determines what populates the `item` namespace in this run's data source. + # + # @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource#source + module Source + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :file_content, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent } + + variant :file_id, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileID } + + # A StoredCompletionsRunDataSource configuration describing a set of filters + variant :stored_completions, + -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions } + + class FileContent < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the jsonl file. + # + # @return [Array] + required :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content] } + + # @!attribute type + # The type of jsonl source. Always `file_content`. + # + # @return [Symbol, :file_content] + required :type, const: :file_content + + # @!method initialize(content:, type: :file_content) + # @param content [Array] The content of the jsonl file. + # + # @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`. + + class Content < OpenAI::Internal::Type::BaseModel + # @!attribute item + # + # @return [Hash{Symbol=>Object}] + required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute sample + # + # @return [Hash{Symbol=>Object}, nil] + optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!method initialize(item:, sample: nil) + # @param item [Hash{Symbol=>Object}] + # @param sample [Hash{Symbol=>Object}] + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The identifier of the file. + # + # @return [String] + required :id, String + + # @!attribute type + # The type of jsonl source. Always `file_id`. + # + # @return [Symbol, :file_id] + required :type, const: :file_id + + # @!method initialize(id:, type: :file_id) + # @param id [String] The identifier of the file. + # + # @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`. + end + + class StoredCompletions < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of source. Always `stored_completions`. + # + # @return [Symbol, :stored_completions] + required :type, const: :stored_completions + + # @!attribute created_after + # An optional Unix timestamp to filter items created after this time. + # + # @return [Integer, nil] + optional :created_after, Integer, nil?: true + + # @!attribute created_before + # An optional Unix timestamp to filter items created before this time. + # + # @return [Integer, nil] + optional :created_before, Integer, nil?: true + + # @!attribute limit + # An optional maximum number of items to return. + # + # @return [Integer, nil] + optional :limit, Integer, nil?: true + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute model + # An optional model to filter by (e.g., 'gpt-4o'). + # + # @return [String, nil] + optional :model, String, nil?: true + + # @!method initialize(created_after: nil, created_before: nil, limit: nil, metadata: nil, model: nil, type: :stored_completions) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions} + # for more details. + # + # A StoredCompletionsRunDataSource configuration describing a set of filters + # + # @param created_after [Integer, nil] An optional Unix timestamp to filter items created after this time. + # + # @param created_before [Integer, nil] An optional Unix timestamp to filter items created before this time. + # + # @param limit [Integer, nil] An optional maximum number of items to return. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String, nil] An optional model to filter by (e.g., 'gpt-4o'). + # + # @param type [Symbol, :stored_completions] The type of source. Always `stored_completions`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileID, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions)] + end + + # The type of run data source. Always `completions`. + # + # @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource#type + module Type + extend OpenAI::Internal::Type::Enum + + COMPLETIONS = :completions + + # @!method self.values + # @return [Array] + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource#input_messages + module InputMessages + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :template, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template } + + variant :item_reference, + -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference } + + class Template < OpenAI::Internal::Type::BaseModel + # @!attribute template + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + # + # @return [Array] + required :template, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template] } + + # @!attribute type + # The type of input messages. Always `template`. + # + # @return [Symbol, :template] + required :type, const: :template + + # @!method initialize(template:, type: :template) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template} + # for more details. + # + # @param template [Array] A list of chat messages forming the prompt or context. May include variable refe + # + # @param type [Symbol, :template] The type of input messages. Always `template`. + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + variant -> { OpenAI::Responses::EasyInputMessage } + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + variant -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem } + + class EvalItem < OpenAI::Internal::Type::BaseModel + # @!attribute content + # Inputs to the model - can contain template strings. + # + # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] + required :content, + union: -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content } + + # @!attribute role + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @return [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role] + required :role, + enum: -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role } + + # @!attribute type + # The type of the message input. Always `message`. + # + # @return [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Type, nil] + optional :type, + enum: -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Type } + + # @!method initialize(content:, role:, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem} + # for more details. + # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + # + # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] Inputs to the model - can contain template strings. + # + # @param role [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or + # + # @param type [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`. + + # Inputs to the model - can contain template strings. + # + # @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem#content + module Content + extend OpenAI::Internal::Type::Union + + # A text input to the model. + variant String + + # A text input to the model. + variant -> { OpenAI::Responses::ResponseInputText } + + # A text output from the model. + variant -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText } + + # An image input to the model. + variant -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage } + + # A list of inputs, each of which may be either an input text or input image object. + variant -> { OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray } + + class OutputText < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the output text. Always `output_text`. + # + # @return [Symbol, :output_text] + required :type, const: :output_text + + # @!method initialize(text:, type: :output_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText} + # for more details. + # + # A text output from the model. + # + # @param text [String] The text output from the model. + # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + end + + class InputImage < OpenAI::Internal::Type::BaseModel + # @!attribute image_url + # The URL of the image input. + # + # @return [String] + required :image_url, String + + # @!attribute type + # The type of the image input. Always `input_image`. + # + # @return [Symbol, :input_image] + required :type, const: :input_image + + # @!attribute detail + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @return [String, nil] + optional :detail, String + + # @!method initialize(image_url:, detail: nil, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage} + # for more details. + # + # An image input to the model. + # + # @param image_url [String] The URL of the image input. + # + # @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param type [Symbol, :input_image] The type of the image input. Always `input_image`. + end + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, Array)] + + # @type [OpenAI::Internal::Type::Converter] + AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem#role + module Role + extend OpenAI::Internal::Type::Enum + + USER = :user + ASSISTANT = :assistant + SYSTEM = :system + DEVELOPER = :developer + + # @!method self.values + # @return [Array] + end + + # The type of the message input. Always `message`. + # + # @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem#type + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE = :message + + # @!method self.values + # @return [Array] + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem)] + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + # @!attribute item_reference + # A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" + # + # @return [String] + required :item_reference, String + + # @!attribute type + # The type of input messages. Always `item_reference`. + # + # @return [Symbol, :item_reference] + required :type, const: :item_reference + + # @!method initialize(item_reference:, type: :item_reference) + # @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" + # + # @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference)] + end + + # @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource#sampling_params + class SamplingParams < OpenAI::Internal::Type::BaseModel + # @!attribute max_completion_tokens + # The maximum number of tokens in the generated output. + # + # @return [Integer, nil] + optional :max_completion_tokens, Integer + + # @!attribute response_format + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + # + # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject, nil] + optional :response_format, + union: -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::ResponseFormat } + + # @!attribute seed + # A seed value to initialize the randomness, during sampling. + # + # @return [Integer, nil] + optional :seed, Integer + + # @!attribute temperature + # A higher temperature increases randomness in the outputs. + # + # @return [Float, nil] + optional :temperature, Float + + # @!attribute tools + # A list of tools the model may call. Currently, only functions are supported as a + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. + # + # @return [Array, nil] + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionFunctionTool] } + + # @!attribute top_p + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + # + # @return [Float, nil] + optional :top_p, Float + + # @!method initialize(max_completion_tokens: nil, response_format: nil, seed: nil, temperature: nil, tools: nil, top_p: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams} for + # more details. + # + # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. + # + # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. + # + # @param seed [Integer] A seed value to initialize the randomness, during sampling. + # + # @param temperature [Float] A higher temperature increases randomness in the outputs. + # + # @param tools [Array] A list of tools the model may call. Currently, only functions are supported as a + # + # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + # + # @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams#response_format + module ResponseFormat + extend OpenAI::Internal::Type::Union + + # Default response format. Used to generate text responses. + variant -> { OpenAI::ResponseFormatText } + + # JSON Schema response format. Used to generate structured JSON responses. + # Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + variant -> { OpenAI::ResponseFormatJSONSchema } + + # JSON object response format. An older method of generating JSON responses. + # Using `json_schema` is recommended for models that support it. Note that the + # model will not generate JSON without a system or user message instructing it + # to do so. + variant -> { OpenAI::ResponseFormatJSONObject } + + # @!method self.variants + # @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject)] + end + end + end + end + end +end diff --git a/lib/openai/models/evals/create_eval_jsonl_run_data_source.rb b/lib/openai/models/evals/create_eval_jsonl_run_data_source.rb new file mode 100644 index 00000000..3afc6154 --- /dev/null +++ b/lib/openai/models/evals/create_eval_jsonl_run_data_source.rb @@ -0,0 +1,100 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + class CreateEvalJSONLRunDataSource < OpenAI::Internal::Type::BaseModel + # @!attribute source + # Determines what populates the `item` namespace in the data source. + # + # @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileID] + required :source, union: -> { OpenAI::Evals::CreateEvalJSONLRunDataSource::Source } + + # @!attribute type + # The type of data source. Always `jsonl`. + # + # @return [Symbol, :jsonl] + required :type, const: :jsonl + + # @!method initialize(source:, type: :jsonl) + # A JsonlRunDataSource object with that specifies a JSONL file that matches the + # eval + # + # @param source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileID] Determines what populates the `item` namespace in the data source. + # + # @param type [Symbol, :jsonl] The type of data source. Always `jsonl`. + + # Determines what populates the `item` namespace in the data source. + # + # @see OpenAI::Models::Evals::CreateEvalJSONLRunDataSource#source + module Source + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :file_content, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent } + + variant :file_id, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileID } + + class FileContent < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the jsonl file. + # + # @return [Array] + required :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content] } + + # @!attribute type + # The type of jsonl source. Always `file_content`. + # + # @return [Symbol, :file_content] + required :type, const: :file_content + + # @!method initialize(content:, type: :file_content) + # @param content [Array] The content of the jsonl file. + # + # @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`. + + class Content < OpenAI::Internal::Type::BaseModel + # @!attribute item + # + # @return [Hash{Symbol=>Object}] + required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute sample + # + # @return [Hash{Symbol=>Object}, nil] + optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!method initialize(item:, sample: nil) + # @param item [Hash{Symbol=>Object}] + # @param sample [Hash{Symbol=>Object}] + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The identifier of the file. + # + # @return [String] + required :id, String + + # @!attribute type + # The type of jsonl source. Always `file_id`. + # + # @return [Symbol, :file_id] + required :type, const: :file_id + + # @!method initialize(id:, type: :file_id) + # @param id [String] The identifier of the file. + # + # @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileID)] + end + end + end + end +end diff --git a/lib/openai/models/evals/eval_api_error.rb b/lib/openai/models/evals/eval_api_error.rb new file mode 100644 index 00000000..87f11122 --- /dev/null +++ b/lib/openai/models/evals/eval_api_error.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + class EvalAPIError < OpenAI::Internal::Type::BaseModel + # @!attribute code + # The error code. + # + # @return [String] + required :code, String + + # @!attribute message + # The error message. + # + # @return [String] + required :message, String + + # @!method initialize(code:, message:) + # An object representing an error response from the Eval API. + # + # @param code [String] The error code. + # + # @param message [String] The error message. + end + end + + EvalAPIError = Evals::EvalAPIError + end +end diff --git a/lib/openai/models/evals/run_cancel_params.rb b/lib/openai/models/evals/run_cancel_params.rb new file mode 100644 index 00000000..6f84e423 --- /dev/null +++ b/lib/openai/models/evals/run_cancel_params.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + # @see OpenAI::Resources::Evals::Runs#cancel + class RunCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute eval_id + # + # @return [String] + required :eval_id, String + + # @!method initialize(eval_id:, request_options: {}) + # @param eval_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/evals/run_cancel_response.rb b/lib/openai/models/evals/run_cancel_response.rb new file mode 100644 index 00000000..49fda248 --- /dev/null +++ b/lib/openai/models/evals/run_cancel_response.rb @@ -0,0 +1,877 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + # @see OpenAI::Resources::Evals::Runs#cancel + class RunCancelResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the evaluation run. + # + # @return [String] + required :id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the evaluation run was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data_source + # Information about the run's data source. + # + # @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses] + required :data_source, union: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource } + + # @!attribute error + # An object representing an error response from the Eval API. + # + # @return [OpenAI::Models::Evals::EvalAPIError] + required :error, -> { OpenAI::Evals::EvalAPIError } + + # @!attribute eval_id + # The identifier of the associated evaluation. + # + # @return [String] + required :eval_id, String + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute model + # The model that is evaluated, if applicable. + # + # @return [String] + required :model, String + + # @!attribute name + # The name of the evaluation run. + # + # @return [String] + required :name, String + + # @!attribute object + # The type of the object. Always "eval.run". + # + # @return [Symbol, :"eval.run"] + required :object, const: :"eval.run" + + # @!attribute per_model_usage + # Usage statistics for each model during the evaluation run. + # + # @return [Array] + required :per_model_usage, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCancelResponse::PerModelUsage] } + + # @!attribute per_testing_criteria_results + # Results per testing criteria applied during the evaluation run. + # + # @return [Array] + required :per_testing_criteria_results, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult] } + + # @!attribute report_url + # The URL to the rendered evaluation run report on the UI dashboard. + # + # @return [String] + required :report_url, String + + # @!attribute result_counts + # Counters summarizing the outcomes of the evaluation run. + # + # @return [OpenAI::Models::Evals::RunCancelResponse::ResultCounts] + required :result_counts, -> { OpenAI::Models::Evals::RunCancelResponse::ResultCounts } + + # @!attribute status + # The status of the evaluation run. + # + # @return [String] + required :status, String + + # @!method initialize(id:, created_at:, data_source:, error:, eval_id:, metadata:, model:, name:, per_model_usage:, per_testing_criteria_results:, report_url:, result_counts:, status:, object: :"eval.run") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCancelResponse} for more details. + # + # A schema representing an evaluation run. + # + # @param id [String] Unique identifier for the evaluation run. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created. + # + # @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses] Information about the run's data source. + # + # @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API. + # + # @param eval_id [String] The identifier of the associated evaluation. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String] The model that is evaluated, if applicable. + # + # @param name [String] The name of the evaluation run. + # + # @param per_model_usage [Array] Usage statistics for each model during the evaluation run. + # + # @param per_testing_criteria_results [Array] Results per testing criteria applied during the evaluation run. + # + # @param report_url [String] The URL to the rendered evaluation run report on the UI dashboard. + # + # @param result_counts [OpenAI::Models::Evals::RunCancelResponse::ResultCounts] Counters summarizing the outcomes of the evaluation run. + # + # @param status [String] The status of the evaluation run. + # + # @param object [Symbol, :"eval.run"] The type of the object. Always "eval.run". + + # Information about the run's data source. + # + # @see OpenAI::Models::Evals::RunCancelResponse#data_source + module DataSource + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A JsonlRunDataSource object with that specifies a JSONL file that matches the eval + variant :jsonl, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource } + + # A CompletionsRunDataSource object describing a model sampling configuration. + variant :completions, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource } + + # A ResponsesRunDataSource object describing a model sampling configuration. + variant :responses, -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses } + + class Responses < OpenAI::Internal::Type::BaseModel + # @!attribute source + # Determines what populates the `item` namespace in this run's data source. + # + # @return [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses] + required :source, union: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source } + + # @!attribute type + # The type of run data source. Always `responses`. + # + # @return [Symbol, :responses] + required :type, const: :responses + + # @!attribute input_messages + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @return [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference, nil] + optional :input_messages, + union: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages } + + # @!attribute model + # The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @return [String, nil] + optional :model, String + + # @!attribute sampling_params + # + # @return [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams, nil] + optional :sampling_params, + -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams } + + # @!method initialize(source:, input_messages: nil, model: nil, sampling_params: nil, type: :responses) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses} for more + # details. + # + # A ResponsesRunDataSource object describing a model sampling configuration. + # + # @param source [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses] Determines what populates the `item` namespace in this run's data source. + # + # @param input_messages [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i + # + # @param model [String] The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @param sampling_params [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams] + # + # @param type [Symbol, :responses] The type of run data source. Always `responses`. + + # Determines what populates the `item` namespace in this run's data source. + # + # @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses#source + module Source + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :file_content, + -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent } + + variant :file_id, -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID } + + # A EvalResponsesSource object describing a run data source configuration. + variant :responses, + -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses } + + class FileContent < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the jsonl file. + # + # @return [Array] + required :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content] } + + # @!attribute type + # The type of jsonl source. Always `file_content`. + # + # @return [Symbol, :file_content] + required :type, const: :file_content + + # @!method initialize(content:, type: :file_content) + # @param content [Array] The content of the jsonl file. + # + # @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`. + + class Content < OpenAI::Internal::Type::BaseModel + # @!attribute item + # + # @return [Hash{Symbol=>Object}] + required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute sample + # + # @return [Hash{Symbol=>Object}, nil] + optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!method initialize(item:, sample: nil) + # @param item [Hash{Symbol=>Object}] + # @param sample [Hash{Symbol=>Object}] + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The identifier of the file. + # + # @return [String] + required :id, String + + # @!attribute type + # The type of jsonl source. Always `file_id`. + # + # @return [Symbol, :file_id] + required :type, const: :file_id + + # @!method initialize(id:, type: :file_id) + # @param id [String] The identifier of the file. + # + # @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`. + end + + class Responses < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of run data source. Always `responses`. + # + # @return [Symbol, :responses] + required :type, const: :responses + + # @!attribute created_after + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + # + # @return [Integer, nil] + optional :created_after, Integer, nil?: true + + # @!attribute created_before + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + # + # @return [Integer, nil] + optional :created_before, Integer, nil?: true + + # @!attribute instructions_search + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + # + # @return [String, nil] + optional :instructions_search, String, nil?: true + + # @!attribute metadata + # Metadata filter for the responses. This is a query parameter used to select + # responses. + # + # @return [Object, nil] + optional :metadata, OpenAI::Internal::Type::Unknown, nil?: true + + # @!attribute model + # The name of the model to find responses for. This is a query parameter used to + # select responses. + # + # @return [String, nil] + optional :model, String, nil?: true + + # @!attribute reasoning_effort + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + + # @!attribute temperature + # Sampling temperature. This is a query parameter used to select responses. + # + # @return [Float, nil] + optional :temperature, Float, nil?: true + + # @!attribute tools + # List of tool names. This is a query parameter used to select responses. + # + # @return [Array, nil] + optional :tools, OpenAI::Internal::Type::ArrayOf[String], nil?: true + + # @!attribute top_p + # Nucleus sampling parameter. This is a query parameter used to select responses. + # + # @return [Float, nil] + optional :top_p, Float, nil?: true + + # @!attribute users + # List of user identifiers. This is a query parameter used to select responses. + # + # @return [Array, nil] + optional :users, OpenAI::Internal::Type::ArrayOf[String], nil?: true + + # @!method initialize(created_after: nil, created_before: nil, instructions_search: nil, metadata: nil, model: nil, reasoning_effort: nil, temperature: nil, tools: nil, top_p: nil, users: nil, type: :responses) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses} + # for more details. + # + # A EvalResponsesSource object describing a run data source configuration. + # + # @param created_after [Integer, nil] Only include items created after this timestamp (inclusive). This is a query par + # + # @param created_before [Integer, nil] Only include items created before this timestamp (inclusive). This is a query pa + # + # @param instructions_search [String, nil] Optional string to search the 'instructions' field. This is a query parameter us + # + # @param metadata [Object, nil] Metadata filter for the responses. This is a query parameter used to select resp + # + # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s + # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re + # + # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses. + # + # @param tools [Array, nil] List of tool names. This is a query parameter used to select responses. + # + # @param top_p [Float, nil] Nucleus sampling parameter. This is a query parameter used to select responses. + # + # @param users [Array, nil] List of user identifiers. This is a query parameter used to select responses. + # + # @param type [Symbol, :responses] The type of run data source. Always `responses`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses)] + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses#input_messages + module InputMessages + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :template, + -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template } + + variant :item_reference, + -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference } + + class Template < OpenAI::Internal::Type::BaseModel + # @!attribute template + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + # + # @return [Array] + required :template, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template] } + + # @!attribute type + # The type of input messages. Always `template`. + # + # @return [Symbol, :template] + required :type, const: :template + + # @!method initialize(template:, type: :template) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template} + # for more details. + # + # @param template [Array] A list of chat messages forming the prompt or context. May include variable refe + # + # @param type [Symbol, :template] The type of input messages. Always `template`. + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + variant -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage } + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + variant -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem } + + class ChatMessage < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the message. + # + # @return [String] + required :content, String + + # @!attribute role + # The role of the message (e.g. "system", "assistant", "user"). + # + # @return [String] + required :role, String + + # @!method initialize(content:, role:) + # @param content [String] The content of the message. + # + # @param role [String] The role of the message (e.g. "system", "assistant", "user"). + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + # @!attribute content + # Inputs to the model - can contain template strings. + # + # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] + required :content, + union: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content } + + # @!attribute role + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @return [Symbol, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] + required :role, + enum: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role } + + # @!attribute type + # The type of the message input. Always `message`. + # + # @return [Symbol, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type, nil] + optional :type, + enum: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type } + + # @!method initialize(content:, role:, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem} + # for more details. + # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + # + # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] Inputs to the model - can contain template strings. + # + # @param role [Symbol, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or + # + # @param type [Symbol, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`. + + # Inputs to the model - can contain template strings. + # + # @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#content + module Content + extend OpenAI::Internal::Type::Union + + # A text input to the model. + variant String + + # A text input to the model. + variant -> { OpenAI::Responses::ResponseInputText } + + # A text output from the model. + variant -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText } + + # An image input to the model. + variant -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage } + + # A list of inputs, each of which may be either an input text or input image object. + variant -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray } + + class OutputText < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the output text. Always `output_text`. + # + # @return [Symbol, :output_text] + required :type, const: :output_text + + # @!method initialize(text:, type: :output_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText} + # for more details. + # + # A text output from the model. + # + # @param text [String] The text output from the model. + # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + end + + class InputImage < OpenAI::Internal::Type::BaseModel + # @!attribute image_url + # The URL of the image input. + # + # @return [String] + required :image_url, String + + # @!attribute type + # The type of the image input. Always `input_image`. + # + # @return [Symbol, :input_image] + required :type, const: :input_image + + # @!attribute detail + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @return [String, nil] + optional :detail, String + + # @!method initialize(image_url:, detail: nil, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage} + # for more details. + # + # An image input to the model. + # + # @param image_url [String] The URL of the image input. + # + # @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param type [Symbol, :input_image] The type of the image input. Always `input_image`. + end + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array)] + + # @type [OpenAI::Internal::Type::Converter] + AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#role + module Role + extend OpenAI::Internal::Type::Enum + + USER = :user + ASSISTANT = :assistant + SYSTEM = :system + DEVELOPER = :developer + + # @!method self.values + # @return [Array] + end + + # The type of the message input. Always `message`. + # + # @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#type + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE = :message + + # @!method self.values + # @return [Array] + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem)] + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + # @!attribute item_reference + # A reference to a variable in the `item` namespace. Ie, "item.name" + # + # @return [String] + required :item_reference, String + + # @!attribute type + # The type of input messages. Always `item_reference`. + # + # @return [Symbol, :item_reference] + required :type, const: :item_reference + + # @!method initialize(item_reference:, type: :item_reference) + # @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.name" + # + # @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference)] + end + + # @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses#sampling_params + class SamplingParams < OpenAI::Internal::Type::BaseModel + # @!attribute max_completion_tokens + # The maximum number of tokens in the generated output. + # + # @return [Integer, nil] + optional :max_completion_tokens, Integer + + # @!attribute seed + # A seed value to initialize the randomness, during sampling. + # + # @return [Integer, nil] + optional :seed, Integer + + # @!attribute temperature + # A higher temperature increases randomness in the outputs. + # + # @return [Float, nil] + optional :temperature, Float + + # @!attribute text + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @return [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, nil] + optional :text, + -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text } + + # @!attribute tools + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # + # @return [Array, nil] + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } + + # @!attribute top_p + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + # + # @return [Float, nil] + optional :top_p, Float + + # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams} + # for more details. + # + # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. + # + # @param seed [Integer] A seed value to initialize the randomness, during sampling. + # + # @param temperature [Float] A higher temperature increases randomness in the outputs. + # + # @param text [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain + # + # @param tools [Array] An array of tools the model may call while generating a response. You + # + # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + + # @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams#text + class Text < OpenAI::Internal::Type::BaseModel + # @!attribute format_ + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + # + # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil] + optional :format_, + union: -> { + OpenAI::Responses::ResponseFormatTextConfig + }, + api_name: :format + + # @!method initialize(format_: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text} + # for more details. + # + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. + end + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses)] + end + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + # @!attribute cached_tokens + # The number of tokens retrieved from cache. + # + # @return [Integer] + required :cached_tokens, Integer + + # @!attribute completion_tokens + # The number of completion tokens generated. + # + # @return [Integer] + required :completion_tokens, Integer + + # @!attribute invocation_count + # The number of invocations. + # + # @return [Integer] + required :invocation_count, Integer + + # @!attribute model_name + # The name of the model. + # + # @return [String] + required :model_name, String + + # @!attribute prompt_tokens + # The number of prompt tokens used. + # + # @return [Integer] + required :prompt_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens used. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(cached_tokens:, completion_tokens:, invocation_count:, model_name:, prompt_tokens:, total_tokens:) + # @param cached_tokens [Integer] The number of tokens retrieved from cache. + # + # @param completion_tokens [Integer] The number of completion tokens generated. + # + # @param invocation_count [Integer] The number of invocations. + # + # @param model_name [String] The name of the model. + # + # @param prompt_tokens [Integer] The number of prompt tokens used. + # + # @param total_tokens [Integer] The total number of tokens used. + end + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + # @!attribute failed + # Number of tests failed for this criteria. + # + # @return [Integer] + required :failed, Integer + + # @!attribute passed + # Number of tests passed for this criteria. + # + # @return [Integer] + required :passed, Integer + + # @!attribute testing_criteria + # A description of the testing criteria. + # + # @return [String] + required :testing_criteria, String + + # @!method initialize(failed:, passed:, testing_criteria:) + # @param failed [Integer] Number of tests failed for this criteria. + # + # @param passed [Integer] Number of tests passed for this criteria. + # + # @param testing_criteria [String] A description of the testing criteria. + end + + # @see OpenAI::Models::Evals::RunCancelResponse#result_counts + class ResultCounts < OpenAI::Internal::Type::BaseModel + # @!attribute errored + # Number of output items that resulted in an error. + # + # @return [Integer] + required :errored, Integer + + # @!attribute failed + # Number of output items that failed to pass the evaluation. + # + # @return [Integer] + required :failed, Integer + + # @!attribute passed + # Number of output items that passed the evaluation. + # + # @return [Integer] + required :passed, Integer + + # @!attribute total + # Total number of executed output items. + # + # @return [Integer] + required :total, Integer + + # @!method initialize(errored:, failed:, passed:, total:) + # Counters summarizing the outcomes of the evaluation run. + # + # @param errored [Integer] Number of output items that resulted in an error. + # + # @param failed [Integer] Number of output items that failed to pass the evaluation. + # + # @param passed [Integer] Number of output items that passed the evaluation. + # + # @param total [Integer] Total number of executed output items. + end + end + end + end +end diff --git a/lib/openai/models/evals/run_create_params.rb b/lib/openai/models/evals/run_create_params.rb new file mode 100644 index 00000000..edacb844 --- /dev/null +++ b/lib/openai/models/evals/run_create_params.rb @@ -0,0 +1,689 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + # @see OpenAI::Resources::Evals::Runs#create + class RunCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute data_source + # Details about the run's data source. + # + # @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource] + required :data_source, union: -> { OpenAI::Evals::RunCreateParams::DataSource } + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute name + # The name of the run. + # + # @return [String, nil] + optional :name, String + + # @!method initialize(data_source:, metadata: nil, name: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateParams} for more details. + # + # @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource] Details about the run's data source. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] The name of the run. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # Details about the run's data source. + module DataSource + extend OpenAI::Internal::Type::Union + + # A JsonlRunDataSource object with that specifies a JSONL file that matches the eval + variant -> { OpenAI::Evals::CreateEvalJSONLRunDataSource } + + # A CompletionsRunDataSource object describing a model sampling configuration. + variant -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource } + + # A ResponsesRunDataSource object describing a model sampling configuration. + variant -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource } + + class CreateEvalResponsesRunDataSource < OpenAI::Internal::Type::BaseModel + # @!attribute source + # Determines what populates the `item` namespace in this run's data source. + # + # @return [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses] + required :source, + union: -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source } + + # @!attribute type + # The type of run data source. Always `responses`. + # + # @return [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type] + required :type, + enum: -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type } + + # @!attribute input_messages + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @return [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference, nil] + optional :input_messages, + union: -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages } + + # @!attribute model + # The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @return [String, nil] + optional :model, String + + # @!attribute sampling_params + # + # @return [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams, nil] + optional :sampling_params, + -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams } + + # @!method initialize(source:, type:, input_messages: nil, model: nil, sampling_params: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource} + # for more details. + # + # A ResponsesRunDataSource object describing a model sampling configuration. + # + # @param source [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses] Determines what populates the `item` namespace in this run's data source. + # + # @param type [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type] The type of run data source. Always `responses`. + # + # @param input_messages [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i + # + # @param model [String] The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @param sampling_params [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams] + + # Determines what populates the `item` namespace in this run's data source. + # + # @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource#source + module Source + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :file_content, + -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent } + + variant :file_id, + -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID } + + # A EvalResponsesSource object describing a run data source configuration. + variant :responses, + -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses } + + class FileContent < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the jsonl file. + # + # @return [Array] + required :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content] } + + # @!attribute type + # The type of jsonl source. Always `file_content`. + # + # @return [Symbol, :file_content] + required :type, const: :file_content + + # @!method initialize(content:, type: :file_content) + # @param content [Array] The content of the jsonl file. + # + # @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`. + + class Content < OpenAI::Internal::Type::BaseModel + # @!attribute item + # + # @return [Hash{Symbol=>Object}] + required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute sample + # + # @return [Hash{Symbol=>Object}, nil] + optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!method initialize(item:, sample: nil) + # @param item [Hash{Symbol=>Object}] + # @param sample [Hash{Symbol=>Object}] + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The identifier of the file. + # + # @return [String] + required :id, String + + # @!attribute type + # The type of jsonl source. Always `file_id`. + # + # @return [Symbol, :file_id] + required :type, const: :file_id + + # @!method initialize(id:, type: :file_id) + # @param id [String] The identifier of the file. + # + # @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`. + end + + class Responses < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of run data source. Always `responses`. + # + # @return [Symbol, :responses] + required :type, const: :responses + + # @!attribute created_after + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + # + # @return [Integer, nil] + optional :created_after, Integer, nil?: true + + # @!attribute created_before + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + # + # @return [Integer, nil] + optional :created_before, Integer, nil?: true + + # @!attribute instructions_search + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + # + # @return [String, nil] + optional :instructions_search, String, nil?: true + + # @!attribute metadata + # Metadata filter for the responses. This is a query parameter used to select + # responses. + # + # @return [Object, nil] + optional :metadata, OpenAI::Internal::Type::Unknown, nil?: true + + # @!attribute model + # The name of the model to find responses for. This is a query parameter used to + # select responses. + # + # @return [String, nil] + optional :model, String, nil?: true + + # @!attribute reasoning_effort + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + + # @!attribute temperature + # Sampling temperature. This is a query parameter used to select responses. + # + # @return [Float, nil] + optional :temperature, Float, nil?: true + + # @!attribute tools + # List of tool names. This is a query parameter used to select responses. + # + # @return [Array, nil] + optional :tools, OpenAI::Internal::Type::ArrayOf[String], nil?: true + + # @!attribute top_p + # Nucleus sampling parameter. This is a query parameter used to select responses. + # + # @return [Float, nil] + optional :top_p, Float, nil?: true + + # @!attribute users + # List of user identifiers. This is a query parameter used to select responses. + # + # @return [Array, nil] + optional :users, OpenAI::Internal::Type::ArrayOf[String], nil?: true + + # @!method initialize(created_after: nil, created_before: nil, instructions_search: nil, metadata: nil, model: nil, reasoning_effort: nil, temperature: nil, tools: nil, top_p: nil, users: nil, type: :responses) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses} + # for more details. + # + # A EvalResponsesSource object describing a run data source configuration. + # + # @param created_after [Integer, nil] Only include items created after this timestamp (inclusive). This is a query par + # + # @param created_before [Integer, nil] Only include items created before this timestamp (inclusive). This is a query pa + # + # @param instructions_search [String, nil] Optional string to search the 'instructions' field. This is a query parameter us + # + # @param metadata [Object, nil] Metadata filter for the responses. This is a query parameter used to select resp + # + # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s + # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re + # + # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses. + # + # @param tools [Array, nil] List of tool names. This is a query parameter used to select responses. + # + # @param top_p [Float, nil] Nucleus sampling parameter. This is a query parameter used to select responses. + # + # @param users [Array, nil] List of user identifiers. This is a query parameter used to select responses. + # + # @param type [Symbol, :responses] The type of run data source. Always `responses`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses)] + end + + # The type of run data source. Always `responses`. + # + # @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource#type + module Type + extend OpenAI::Internal::Type::Enum + + RESPONSES = :responses + + # @!method self.values + # @return [Array] + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource#input_messages + module InputMessages + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :template, + -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template } + + variant :item_reference, + -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference } + + class Template < OpenAI::Internal::Type::BaseModel + # @!attribute template + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + # + # @return [Array] + required :template, + -> do + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template + ] + end + + # @!attribute type + # The type of input messages. Always `template`. + # + # @return [Symbol, :template] + required :type, const: :template + + # @!method initialize(template:, type: :template) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template} + # for more details. + # + # @param template [Array] A list of chat messages forming the prompt or context. May include variable refe + # + # @param type [Symbol, :template] The type of input messages. Always `template`. + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + variant -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage } + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + variant -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem } + + class ChatMessage < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the message. + # + # @return [String] + required :content, String + + # @!attribute role + # The role of the message (e.g. "system", "assistant", "user"). + # + # @return [String] + required :role, String + + # @!method initialize(content:, role:) + # @param content [String] The content of the message. + # + # @param role [String] The role of the message (e.g. "system", "assistant", "user"). + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + # @!attribute content + # Inputs to the model - can contain template strings. + # + # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] + required :content, + union: -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content } + + # @!attribute role + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @return [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role] + required :role, + enum: -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role } + + # @!attribute type + # The type of the message input. Always `message`. + # + # @return [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type, nil] + optional :type, + enum: -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type } + + # @!method initialize(content:, role:, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem} + # for more details. + # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + # + # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] Inputs to the model - can contain template strings. + # + # @param role [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or + # + # @param type [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`. + + # Inputs to the model - can contain template strings. + # + # @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem#content + module Content + extend OpenAI::Internal::Type::Union + + # A text input to the model. + variant String + + # A text input to the model. + variant -> { OpenAI::Responses::ResponseInputText } + + # A text output from the model. + variant -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText } + + # An image input to the model. + variant -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage } + + # A list of inputs, each of which may be either an input text or input image object. + variant -> { OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray } + + class OutputText < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the output text. Always `output_text`. + # + # @return [Symbol, :output_text] + required :type, const: :output_text + + # @!method initialize(text:, type: :output_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText} + # for more details. + # + # A text output from the model. + # + # @param text [String] The text output from the model. + # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + end + + class InputImage < OpenAI::Internal::Type::BaseModel + # @!attribute image_url + # The URL of the image input. + # + # @return [String] + required :image_url, String + + # @!attribute type + # The type of the image input. Always `input_image`. + # + # @return [Symbol, :input_image] + required :type, const: :input_image + + # @!attribute detail + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @return [String, nil] + optional :detail, String + + # @!method initialize(image_url:, detail: nil, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage} + # for more details. + # + # An image input to the model. + # + # @param image_url [String] The URL of the image input. + # + # @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param type [Symbol, :input_image] The type of the image input. Always `input_image`. + end + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, Array)] + + # @type [OpenAI::Internal::Type::Converter] + AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem#role + module Role + extend OpenAI::Internal::Type::Enum + + USER = :user + ASSISTANT = :assistant + SYSTEM = :system + DEVELOPER = :developer + + # @!method self.values + # @return [Array] + end + + # The type of the message input. Always `message`. + # + # @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem#type + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE = :message + + # @!method self.values + # @return [Array] + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem)] + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + # @!attribute item_reference + # A reference to a variable in the `item` namespace. Ie, "item.name" + # + # @return [String] + required :item_reference, String + + # @!attribute type + # The type of input messages. Always `item_reference`. + # + # @return [Symbol, :item_reference] + required :type, const: :item_reference + + # @!method initialize(item_reference:, type: :item_reference) + # @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.name" + # + # @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference)] + end + + # @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource#sampling_params + class SamplingParams < OpenAI::Internal::Type::BaseModel + # @!attribute max_completion_tokens + # The maximum number of tokens in the generated output. + # + # @return [Integer, nil] + optional :max_completion_tokens, Integer + + # @!attribute seed + # A seed value to initialize the randomness, during sampling. + # + # @return [Integer, nil] + optional :seed, Integer + + # @!attribute temperature + # A higher temperature increases randomness in the outputs. + # + # @return [Float, nil] + optional :temperature, Float + + # @!attribute text + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @return [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, nil] + optional :text, + -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text } + + # @!attribute tools + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # + # @return [Array, nil] + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } + + # @!attribute top_p + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + # + # @return [Float, nil] + optional :top_p, Float + + # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams} + # for more details. + # + # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. + # + # @param seed [Integer] A seed value to initialize the randomness, during sampling. + # + # @param temperature [Float] A higher temperature increases randomness in the outputs. + # + # @param text [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text] Configuration options for a text response from the model. Can be plain + # + # @param tools [Array] An array of tools the model may call while generating a response. You + # + # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + + # @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams#text + class Text < OpenAI::Internal::Type::BaseModel + # @!attribute format_ + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + # + # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil] + optional :format_, + union: -> { + OpenAI::Responses::ResponseFormatTextConfig + }, + api_name: :format + + # @!method initialize(format_: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text} + # for more details. + # + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. + end + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource)] + end + end + end + end +end diff --git a/lib/openai/models/evals/run_create_response.rb b/lib/openai/models/evals/run_create_response.rb new file mode 100644 index 00000000..175718a2 --- /dev/null +++ b/lib/openai/models/evals/run_create_response.rb @@ -0,0 +1,877 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + # @see OpenAI::Resources::Evals::Runs#create + class RunCreateResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the evaluation run. + # + # @return [String] + required :id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the evaluation run was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data_source + # Information about the run's data source. + # + # @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses] + required :data_source, union: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource } + + # @!attribute error + # An object representing an error response from the Eval API. + # + # @return [OpenAI::Models::Evals::EvalAPIError] + required :error, -> { OpenAI::Evals::EvalAPIError } + + # @!attribute eval_id + # The identifier of the associated evaluation. + # + # @return [String] + required :eval_id, String + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute model + # The model that is evaluated, if applicable. + # + # @return [String] + required :model, String + + # @!attribute name + # The name of the evaluation run. + # + # @return [String] + required :name, String + + # @!attribute object + # The type of the object. Always "eval.run". + # + # @return [Symbol, :"eval.run"] + required :object, const: :"eval.run" + + # @!attribute per_model_usage + # Usage statistics for each model during the evaluation run. + # + # @return [Array] + required :per_model_usage, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCreateResponse::PerModelUsage] } + + # @!attribute per_testing_criteria_results + # Results per testing criteria applied during the evaluation run. + # + # @return [Array] + required :per_testing_criteria_results, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult] } + + # @!attribute report_url + # The URL to the rendered evaluation run report on the UI dashboard. + # + # @return [String] + required :report_url, String + + # @!attribute result_counts + # Counters summarizing the outcomes of the evaluation run. + # + # @return [OpenAI::Models::Evals::RunCreateResponse::ResultCounts] + required :result_counts, -> { OpenAI::Models::Evals::RunCreateResponse::ResultCounts } + + # @!attribute status + # The status of the evaluation run. + # + # @return [String] + required :status, String + + # @!method initialize(id:, created_at:, data_source:, error:, eval_id:, metadata:, model:, name:, per_model_usage:, per_testing_criteria_results:, report_url:, result_counts:, status:, object: :"eval.run") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateResponse} for more details. + # + # A schema representing an evaluation run. + # + # @param id [String] Unique identifier for the evaluation run. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created. + # + # @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses] Information about the run's data source. + # + # @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API. + # + # @param eval_id [String] The identifier of the associated evaluation. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String] The model that is evaluated, if applicable. + # + # @param name [String] The name of the evaluation run. + # + # @param per_model_usage [Array] Usage statistics for each model during the evaluation run. + # + # @param per_testing_criteria_results [Array] Results per testing criteria applied during the evaluation run. + # + # @param report_url [String] The URL to the rendered evaluation run report on the UI dashboard. + # + # @param result_counts [OpenAI::Models::Evals::RunCreateResponse::ResultCounts] Counters summarizing the outcomes of the evaluation run. + # + # @param status [String] The status of the evaluation run. + # + # @param object [Symbol, :"eval.run"] The type of the object. Always "eval.run". + + # Information about the run's data source. + # + # @see OpenAI::Models::Evals::RunCreateResponse#data_source + module DataSource + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A JsonlRunDataSource object with that specifies a JSONL file that matches the eval + variant :jsonl, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource } + + # A CompletionsRunDataSource object describing a model sampling configuration. + variant :completions, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource } + + # A ResponsesRunDataSource object describing a model sampling configuration. + variant :responses, -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses } + + class Responses < OpenAI::Internal::Type::BaseModel + # @!attribute source + # Determines what populates the `item` namespace in this run's data source. + # + # @return [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses] + required :source, union: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source } + + # @!attribute type + # The type of run data source. Always `responses`. + # + # @return [Symbol, :responses] + required :type, const: :responses + + # @!attribute input_messages + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @return [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference, nil] + optional :input_messages, + union: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages } + + # @!attribute model + # The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @return [String, nil] + optional :model, String + + # @!attribute sampling_params + # + # @return [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams, nil] + optional :sampling_params, + -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams } + + # @!method initialize(source:, input_messages: nil, model: nil, sampling_params: nil, type: :responses) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses} for more + # details. + # + # A ResponsesRunDataSource object describing a model sampling configuration. + # + # @param source [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses] Determines what populates the `item` namespace in this run's data source. + # + # @param input_messages [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i + # + # @param model [String] The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @param sampling_params [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams] + # + # @param type [Symbol, :responses] The type of run data source. Always `responses`. + + # Determines what populates the `item` namespace in this run's data source. + # + # @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses#source + module Source + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :file_content, + -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent } + + variant :file_id, -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID } + + # A EvalResponsesSource object describing a run data source configuration. + variant :responses, + -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses } + + class FileContent < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the jsonl file. + # + # @return [Array] + required :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content] } + + # @!attribute type + # The type of jsonl source. Always `file_content`. + # + # @return [Symbol, :file_content] + required :type, const: :file_content + + # @!method initialize(content:, type: :file_content) + # @param content [Array] The content of the jsonl file. + # + # @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`. + + class Content < OpenAI::Internal::Type::BaseModel + # @!attribute item + # + # @return [Hash{Symbol=>Object}] + required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute sample + # + # @return [Hash{Symbol=>Object}, nil] + optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!method initialize(item:, sample: nil) + # @param item [Hash{Symbol=>Object}] + # @param sample [Hash{Symbol=>Object}] + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The identifier of the file. + # + # @return [String] + required :id, String + + # @!attribute type + # The type of jsonl source. Always `file_id`. + # + # @return [Symbol, :file_id] + required :type, const: :file_id + + # @!method initialize(id:, type: :file_id) + # @param id [String] The identifier of the file. + # + # @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`. + end + + class Responses < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of run data source. Always `responses`. + # + # @return [Symbol, :responses] + required :type, const: :responses + + # @!attribute created_after + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + # + # @return [Integer, nil] + optional :created_after, Integer, nil?: true + + # @!attribute created_before + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + # + # @return [Integer, nil] + optional :created_before, Integer, nil?: true + + # @!attribute instructions_search + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + # + # @return [String, nil] + optional :instructions_search, String, nil?: true + + # @!attribute metadata + # Metadata filter for the responses. This is a query parameter used to select + # responses. + # + # @return [Object, nil] + optional :metadata, OpenAI::Internal::Type::Unknown, nil?: true + + # @!attribute model + # The name of the model to find responses for. This is a query parameter used to + # select responses. + # + # @return [String, nil] + optional :model, String, nil?: true + + # @!attribute reasoning_effort + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + + # @!attribute temperature + # Sampling temperature. This is a query parameter used to select responses. + # + # @return [Float, nil] + optional :temperature, Float, nil?: true + + # @!attribute tools + # List of tool names. This is a query parameter used to select responses. + # + # @return [Array, nil] + optional :tools, OpenAI::Internal::Type::ArrayOf[String], nil?: true + + # @!attribute top_p + # Nucleus sampling parameter. This is a query parameter used to select responses. + # + # @return [Float, nil] + optional :top_p, Float, nil?: true + + # @!attribute users + # List of user identifiers. This is a query parameter used to select responses. + # + # @return [Array, nil] + optional :users, OpenAI::Internal::Type::ArrayOf[String], nil?: true + + # @!method initialize(created_after: nil, created_before: nil, instructions_search: nil, metadata: nil, model: nil, reasoning_effort: nil, temperature: nil, tools: nil, top_p: nil, users: nil, type: :responses) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses} + # for more details. + # + # A EvalResponsesSource object describing a run data source configuration. + # + # @param created_after [Integer, nil] Only include items created after this timestamp (inclusive). This is a query par + # + # @param created_before [Integer, nil] Only include items created before this timestamp (inclusive). This is a query pa + # + # @param instructions_search [String, nil] Optional string to search the 'instructions' field. This is a query parameter us + # + # @param metadata [Object, nil] Metadata filter for the responses. This is a query parameter used to select resp + # + # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s + # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re + # + # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses. + # + # @param tools [Array, nil] List of tool names. This is a query parameter used to select responses. + # + # @param top_p [Float, nil] Nucleus sampling parameter. This is a query parameter used to select responses. + # + # @param users [Array, nil] List of user identifiers. This is a query parameter used to select responses. + # + # @param type [Symbol, :responses] The type of run data source. Always `responses`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses)] + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses#input_messages + module InputMessages + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :template, + -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template } + + variant :item_reference, + -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference } + + class Template < OpenAI::Internal::Type::BaseModel + # @!attribute template + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + # + # @return [Array] + required :template, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template] } + + # @!attribute type + # The type of input messages. Always `template`. + # + # @return [Symbol, :template] + required :type, const: :template + + # @!method initialize(template:, type: :template) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template} + # for more details. + # + # @param template [Array] A list of chat messages forming the prompt or context. May include variable refe + # + # @param type [Symbol, :template] The type of input messages. Always `template`. + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + variant -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage } + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + variant -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem } + + class ChatMessage < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the message. + # + # @return [String] + required :content, String + + # @!attribute role + # The role of the message (e.g. "system", "assistant", "user"). + # + # @return [String] + required :role, String + + # @!method initialize(content:, role:) + # @param content [String] The content of the message. + # + # @param role [String] The role of the message (e.g. "system", "assistant", "user"). + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + # @!attribute content + # Inputs to the model - can contain template strings. + # + # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] + required :content, + union: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content } + + # @!attribute role + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @return [Symbol, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] + required :role, + enum: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role } + + # @!attribute type + # The type of the message input. Always `message`. + # + # @return [Symbol, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type, nil] + optional :type, + enum: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type } + + # @!method initialize(content:, role:, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem} + # for more details. + # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + # + # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] Inputs to the model - can contain template strings. + # + # @param role [Symbol, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or + # + # @param type [Symbol, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`. + + # Inputs to the model - can contain template strings. + # + # @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#content + module Content + extend OpenAI::Internal::Type::Union + + # A text input to the model. + variant String + + # A text input to the model. + variant -> { OpenAI::Responses::ResponseInputText } + + # A text output from the model. + variant -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText } + + # An image input to the model. + variant -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage } + + # A list of inputs, each of which may be either an input text or input image object. + variant -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray } + + class OutputText < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the output text. Always `output_text`. + # + # @return [Symbol, :output_text] + required :type, const: :output_text + + # @!method initialize(text:, type: :output_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText} + # for more details. + # + # A text output from the model. + # + # @param text [String] The text output from the model. + # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + end + + class InputImage < OpenAI::Internal::Type::BaseModel + # @!attribute image_url + # The URL of the image input. + # + # @return [String] + required :image_url, String + + # @!attribute type + # The type of the image input. Always `input_image`. + # + # @return [Symbol, :input_image] + required :type, const: :input_image + + # @!attribute detail + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @return [String, nil] + optional :detail, String + + # @!method initialize(image_url:, detail: nil, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage} + # for more details. + # + # An image input to the model. + # + # @param image_url [String] The URL of the image input. + # + # @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param type [Symbol, :input_image] The type of the image input. Always `input_image`. + end + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array)] + + # @type [OpenAI::Internal::Type::Converter] + AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#role + module Role + extend OpenAI::Internal::Type::Enum + + USER = :user + ASSISTANT = :assistant + SYSTEM = :system + DEVELOPER = :developer + + # @!method self.values + # @return [Array] + end + + # The type of the message input. Always `message`. + # + # @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#type + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE = :message + + # @!method self.values + # @return [Array] + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem)] + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + # @!attribute item_reference + # A reference to a variable in the `item` namespace. Ie, "item.name" + # + # @return [String] + required :item_reference, String + + # @!attribute type + # The type of input messages. Always `item_reference`. + # + # @return [Symbol, :item_reference] + required :type, const: :item_reference + + # @!method initialize(item_reference:, type: :item_reference) + # @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.name" + # + # @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference)] + end + + # @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses#sampling_params + class SamplingParams < OpenAI::Internal::Type::BaseModel + # @!attribute max_completion_tokens + # The maximum number of tokens in the generated output. + # + # @return [Integer, nil] + optional :max_completion_tokens, Integer + + # @!attribute seed + # A seed value to initialize the randomness, during sampling. + # + # @return [Integer, nil] + optional :seed, Integer + + # @!attribute temperature + # A higher temperature increases randomness in the outputs. + # + # @return [Float, nil] + optional :temperature, Float + + # @!attribute text + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @return [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text, nil] + optional :text, + -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text } + + # @!attribute tools + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # + # @return [Array, nil] + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } + + # @!attribute top_p + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + # + # @return [Float, nil] + optional :top_p, Float + + # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams} + # for more details. + # + # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. + # + # @param seed [Integer] A seed value to initialize the randomness, during sampling. + # + # @param temperature [Float] A higher temperature increases randomness in the outputs. + # + # @param text [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain + # + # @param tools [Array] An array of tools the model may call while generating a response. You + # + # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + + # @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams#text + class Text < OpenAI::Internal::Type::BaseModel + # @!attribute format_ + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + # + # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil] + optional :format_, + union: -> { + OpenAI::Responses::ResponseFormatTextConfig + }, + api_name: :format + + # @!method initialize(format_: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text} + # for more details. + # + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. + end + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses)] + end + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + # @!attribute cached_tokens + # The number of tokens retrieved from cache. + # + # @return [Integer] + required :cached_tokens, Integer + + # @!attribute completion_tokens + # The number of completion tokens generated. + # + # @return [Integer] + required :completion_tokens, Integer + + # @!attribute invocation_count + # The number of invocations. + # + # @return [Integer] + required :invocation_count, Integer + + # @!attribute model_name + # The name of the model. + # + # @return [String] + required :model_name, String + + # @!attribute prompt_tokens + # The number of prompt tokens used. + # + # @return [Integer] + required :prompt_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens used. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(cached_tokens:, completion_tokens:, invocation_count:, model_name:, prompt_tokens:, total_tokens:) + # @param cached_tokens [Integer] The number of tokens retrieved from cache. + # + # @param completion_tokens [Integer] The number of completion tokens generated. + # + # @param invocation_count [Integer] The number of invocations. + # + # @param model_name [String] The name of the model. + # + # @param prompt_tokens [Integer] The number of prompt tokens used. + # + # @param total_tokens [Integer] The total number of tokens used. + end + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + # @!attribute failed + # Number of tests failed for this criteria. + # + # @return [Integer] + required :failed, Integer + + # @!attribute passed + # Number of tests passed for this criteria. + # + # @return [Integer] + required :passed, Integer + + # @!attribute testing_criteria + # A description of the testing criteria. + # + # @return [String] + required :testing_criteria, String + + # @!method initialize(failed:, passed:, testing_criteria:) + # @param failed [Integer] Number of tests failed for this criteria. + # + # @param passed [Integer] Number of tests passed for this criteria. + # + # @param testing_criteria [String] A description of the testing criteria. + end + + # @see OpenAI::Models::Evals::RunCreateResponse#result_counts + class ResultCounts < OpenAI::Internal::Type::BaseModel + # @!attribute errored + # Number of output items that resulted in an error. + # + # @return [Integer] + required :errored, Integer + + # @!attribute failed + # Number of output items that failed to pass the evaluation. + # + # @return [Integer] + required :failed, Integer + + # @!attribute passed + # Number of output items that passed the evaluation. + # + # @return [Integer] + required :passed, Integer + + # @!attribute total + # Total number of executed output items. + # + # @return [Integer] + required :total, Integer + + # @!method initialize(errored:, failed:, passed:, total:) + # Counters summarizing the outcomes of the evaluation run. + # + # @param errored [Integer] Number of output items that resulted in an error. + # + # @param failed [Integer] Number of output items that failed to pass the evaluation. + # + # @param passed [Integer] Number of output items that passed the evaluation. + # + # @param total [Integer] Total number of executed output items. + end + end + end + end +end diff --git a/lib/openai/models/evals/run_delete_params.rb b/lib/openai/models/evals/run_delete_params.rb new file mode 100644 index 00000000..0f8cc006 --- /dev/null +++ b/lib/openai/models/evals/run_delete_params.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + # @see OpenAI::Resources::Evals::Runs#delete + class RunDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute eval_id + # + # @return [String] + required :eval_id, String + + # @!method initialize(eval_id:, request_options: {}) + # @param eval_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/evals/run_delete_response.rb b/lib/openai/models/evals/run_delete_response.rb new file mode 100644 index 00000000..eb8707ad --- /dev/null +++ b/lib/openai/models/evals/run_delete_response.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + # @see OpenAI::Resources::Evals::Runs#delete + class RunDeleteResponse < OpenAI::Internal::Type::BaseModel + # @!attribute deleted + # + # @return [Boolean, nil] + optional :deleted, OpenAI::Internal::Type::Boolean + + # @!attribute object + # + # @return [String, nil] + optional :object, String + + # @!attribute run_id + # + # @return [String, nil] + optional :run_id, String + + # @!method initialize(deleted: nil, object: nil, run_id: nil) + # @param deleted [Boolean] + # @param object [String] + # @param run_id [String] + end + end + end +end diff --git a/lib/openai/models/evals/run_list_params.rb b/lib/openai/models/evals/run_list_params.rb new file mode 100644 index 00000000..3e0a45b3 --- /dev/null +++ b/lib/openai/models/evals/run_list_params.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + # @see OpenAI::Resources::Evals::Runs#list + class RunListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute after + # Identifier for the last run from the previous pagination request. + # + # @return [String, nil] + optional :after, String + + # @!attribute limit + # Number of runs to retrieve. + # + # @return [Integer, nil] + optional :limit, Integer + + # @!attribute order + # Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for + # descending order. Defaults to `asc`. + # + # @return [Symbol, OpenAI::Models::Evals::RunListParams::Order, nil] + optional :order, enum: -> { OpenAI::Evals::RunListParams::Order } + + # @!attribute status + # Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + # | `canceled`. + # + # @return [Symbol, OpenAI::Models::Evals::RunListParams::Status, nil] + optional :status, enum: -> { OpenAI::Evals::RunListParams::Status } + + # @!method initialize(after: nil, limit: nil, order: nil, status: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListParams} for more details. + # + # @param after [String] Identifier for the last run from the previous pagination request. + # + # @param limit [Integer] Number of runs to retrieve. + # + # @param order [Symbol, OpenAI::Models::Evals::RunListParams::Order] Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for de + # + # @param status [Symbol, OpenAI::Models::Evals::RunListParams::Status] Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for + # descending order. Defaults to `asc`. + module Order + extend OpenAI::Internal::Type::Enum + + ASC = :asc + DESC = :desc + + # @!method self.values + # @return [Array] + end + + # Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + # | `canceled`. + module Status + extend OpenAI::Internal::Type::Enum + + QUEUED = :queued + IN_PROGRESS = :in_progress + COMPLETED = :completed + CANCELED = :canceled + FAILED = :failed + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/evals/run_list_response.rb b/lib/openai/models/evals/run_list_response.rb new file mode 100644 index 00000000..86690fce --- /dev/null +++ b/lib/openai/models/evals/run_list_response.rb @@ -0,0 +1,876 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + # @see OpenAI::Resources::Evals::Runs#list + class RunListResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the evaluation run. + # + # @return [String] + required :id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the evaluation run was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data_source + # Information about the run's data source. + # + # @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunListResponse::DataSource::Responses] + required :data_source, union: -> { OpenAI::Models::Evals::RunListResponse::DataSource } + + # @!attribute error + # An object representing an error response from the Eval API. + # + # @return [OpenAI::Models::Evals::EvalAPIError] + required :error, -> { OpenAI::Evals::EvalAPIError } + + # @!attribute eval_id + # The identifier of the associated evaluation. + # + # @return [String] + required :eval_id, String + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute model + # The model that is evaluated, if applicable. + # + # @return [String] + required :model, String + + # @!attribute name + # The name of the evaluation run. + # + # @return [String] + required :name, String + + # @!attribute object + # The type of the object. Always "eval.run". + # + # @return [Symbol, :"eval.run"] + required :object, const: :"eval.run" + + # @!attribute per_model_usage + # Usage statistics for each model during the evaluation run. + # + # @return [Array] + required :per_model_usage, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunListResponse::PerModelUsage] } + + # @!attribute per_testing_criteria_results + # Results per testing criteria applied during the evaluation run. + # + # @return [Array] + required :per_testing_criteria_results, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult] } + + # @!attribute report_url + # The URL to the rendered evaluation run report on the UI dashboard. + # + # @return [String] + required :report_url, String + + # @!attribute result_counts + # Counters summarizing the outcomes of the evaluation run. + # + # @return [OpenAI::Models::Evals::RunListResponse::ResultCounts] + required :result_counts, -> { OpenAI::Models::Evals::RunListResponse::ResultCounts } + + # @!attribute status + # The status of the evaluation run. + # + # @return [String] + required :status, String + + # @!method initialize(id:, created_at:, data_source:, error:, eval_id:, metadata:, model:, name:, per_model_usage:, per_testing_criteria_results:, report_url:, result_counts:, status:, object: :"eval.run") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListResponse} for more details. + # + # A schema representing an evaluation run. + # + # @param id [String] Unique identifier for the evaluation run. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created. + # + # @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunListResponse::DataSource::Responses] Information about the run's data source. + # + # @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API. + # + # @param eval_id [String] The identifier of the associated evaluation. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String] The model that is evaluated, if applicable. + # + # @param name [String] The name of the evaluation run. + # + # @param per_model_usage [Array] Usage statistics for each model during the evaluation run. + # + # @param per_testing_criteria_results [Array] Results per testing criteria applied during the evaluation run. + # + # @param report_url [String] The URL to the rendered evaluation run report on the UI dashboard. + # + # @param result_counts [OpenAI::Models::Evals::RunListResponse::ResultCounts] Counters summarizing the outcomes of the evaluation run. + # + # @param status [String] The status of the evaluation run. + # + # @param object [Symbol, :"eval.run"] The type of the object. Always "eval.run". + + # Information about the run's data source. + # + # @see OpenAI::Models::Evals::RunListResponse#data_source + module DataSource + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A JsonlRunDataSource object with that specifies a JSONL file that matches the eval + variant :jsonl, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource } + + # A CompletionsRunDataSource object describing a model sampling configuration. + variant :completions, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource } + + # A ResponsesRunDataSource object describing a model sampling configuration. + variant :responses, -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses } + + class Responses < OpenAI::Internal::Type::BaseModel + # @!attribute source + # Determines what populates the `item` namespace in this run's data source. + # + # @return [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses] + required :source, union: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source } + + # @!attribute type + # The type of run data source. Always `responses`. + # + # @return [Symbol, :responses] + required :type, const: :responses + + # @!attribute input_messages + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @return [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference, nil] + optional :input_messages, + union: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages } + + # @!attribute model + # The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @return [String, nil] + optional :model, String + + # @!attribute sampling_params + # + # @return [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams, nil] + optional :sampling_params, + -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams } + + # @!method initialize(source:, input_messages: nil, model: nil, sampling_params: nil, type: :responses) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListResponse::DataSource::Responses} for more + # details. + # + # A ResponsesRunDataSource object describing a model sampling configuration. + # + # @param source [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses] Determines what populates the `item` namespace in this run's data source. + # + # @param input_messages [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i + # + # @param model [String] The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @param sampling_params [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams] + # + # @param type [Symbol, :responses] The type of run data source. Always `responses`. + + # Determines what populates the `item` namespace in this run's data source. + # + # @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses#source + module Source + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :file_content, + -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent } + + variant :file_id, -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID } + + # A EvalResponsesSource object describing a run data source configuration. + variant :responses, + -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses } + + class FileContent < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the jsonl file. + # + # @return [Array] + required :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content] } + + # @!attribute type + # The type of jsonl source. Always `file_content`. + # + # @return [Symbol, :file_content] + required :type, const: :file_content + + # @!method initialize(content:, type: :file_content) + # @param content [Array] The content of the jsonl file. + # + # @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`. + + class Content < OpenAI::Internal::Type::BaseModel + # @!attribute item + # + # @return [Hash{Symbol=>Object}] + required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute sample + # + # @return [Hash{Symbol=>Object}, nil] + optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!method initialize(item:, sample: nil) + # @param item [Hash{Symbol=>Object}] + # @param sample [Hash{Symbol=>Object}] + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The identifier of the file. + # + # @return [String] + required :id, String + + # @!attribute type + # The type of jsonl source. Always `file_id`. + # + # @return [Symbol, :file_id] + required :type, const: :file_id + + # @!method initialize(id:, type: :file_id) + # @param id [String] The identifier of the file. + # + # @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`. + end + + class Responses < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of run data source. Always `responses`. + # + # @return [Symbol, :responses] + required :type, const: :responses + + # @!attribute created_after + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + # + # @return [Integer, nil] + optional :created_after, Integer, nil?: true + + # @!attribute created_before + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + # + # @return [Integer, nil] + optional :created_before, Integer, nil?: true + + # @!attribute instructions_search + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + # + # @return [String, nil] + optional :instructions_search, String, nil?: true + + # @!attribute metadata + # Metadata filter for the responses. This is a query parameter used to select + # responses. + # + # @return [Object, nil] + optional :metadata, OpenAI::Internal::Type::Unknown, nil?: true + + # @!attribute model + # The name of the model to find responses for. This is a query parameter used to + # select responses. + # + # @return [String, nil] + optional :model, String, nil?: true + + # @!attribute reasoning_effort + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + + # @!attribute temperature + # Sampling temperature. This is a query parameter used to select responses. + # + # @return [Float, nil] + optional :temperature, Float, nil?: true + + # @!attribute tools + # List of tool names. This is a query parameter used to select responses. + # + # @return [Array, nil] + optional :tools, OpenAI::Internal::Type::ArrayOf[String], nil?: true + + # @!attribute top_p + # Nucleus sampling parameter. This is a query parameter used to select responses. + # + # @return [Float, nil] + optional :top_p, Float, nil?: true + + # @!attribute users + # List of user identifiers. This is a query parameter used to select responses. + # + # @return [Array, nil] + optional :users, OpenAI::Internal::Type::ArrayOf[String], nil?: true + + # @!method initialize(created_after: nil, created_before: nil, instructions_search: nil, metadata: nil, model: nil, reasoning_effort: nil, temperature: nil, tools: nil, top_p: nil, users: nil, type: :responses) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses} + # for more details. + # + # A EvalResponsesSource object describing a run data source configuration. + # + # @param created_after [Integer, nil] Only include items created after this timestamp (inclusive). This is a query par + # + # @param created_before [Integer, nil] Only include items created before this timestamp (inclusive). This is a query pa + # + # @param instructions_search [String, nil] Optional string to search the 'instructions' field. This is a query parameter us + # + # @param metadata [Object, nil] Metadata filter for the responses. This is a query parameter used to select resp + # + # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s + # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re + # + # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses. + # + # @param tools [Array, nil] List of tool names. This is a query parameter used to select responses. + # + # @param top_p [Float, nil] Nucleus sampling parameter. This is a query parameter used to select responses. + # + # @param users [Array, nil] List of user identifiers. This is a query parameter used to select responses. + # + # @param type [Symbol, :responses] The type of run data source. Always `responses`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses)] + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses#input_messages + module InputMessages + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :template, + -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template } + + variant :item_reference, + -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference } + + class Template < OpenAI::Internal::Type::BaseModel + # @!attribute template + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + # + # @return [Array] + required :template, + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template] } + + # @!attribute type + # The type of input messages. Always `template`. + # + # @return [Symbol, :template] + required :type, const: :template + + # @!method initialize(template:, type: :template) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template} + # for more details. + # + # @param template [Array] A list of chat messages forming the prompt or context. May include variable refe + # + # @param type [Symbol, :template] The type of input messages. Always `template`. + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage } + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem } + + class ChatMessage < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the message. + # + # @return [String] + required :content, String + + # @!attribute role + # The role of the message (e.g. "system", "assistant", "user"). + # + # @return [String] + required :role, String + + # @!method initialize(content:, role:) + # @param content [String] The content of the message. + # + # @param role [String] The role of the message (e.g. "system", "assistant", "user"). + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + # @!attribute content + # Inputs to the model - can contain template strings. + # + # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] + required :content, + union: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content } + + # @!attribute role + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @return [Symbol, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] + required :role, + enum: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role } + + # @!attribute type + # The type of the message input. Always `message`. + # + # @return [Symbol, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type, nil] + optional :type, + enum: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type } + + # @!method initialize(content:, role:, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem} + # for more details. + # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + # + # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] Inputs to the model - can contain template strings. + # + # @param role [Symbol, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or + # + # @param type [Symbol, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`. + + # Inputs to the model - can contain template strings. + # + # @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#content + module Content + extend OpenAI::Internal::Type::Union + + # A text input to the model. + variant String + + # A text input to the model. + variant -> { OpenAI::Responses::ResponseInputText } + + # A text output from the model. + variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText } + + # An image input to the model. + variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage } + + # A list of inputs, each of which may be either an input text or input image object. + variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray } + + class OutputText < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the output text. Always `output_text`. + # + # @return [Symbol, :output_text] + required :type, const: :output_text + + # @!method initialize(text:, type: :output_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText} + # for more details. + # + # A text output from the model. + # + # @param text [String] The text output from the model. + # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + end + + class InputImage < OpenAI::Internal::Type::BaseModel + # @!attribute image_url + # The URL of the image input. + # + # @return [String] + required :image_url, String + + # @!attribute type + # The type of the image input. Always `input_image`. + # + # @return [Symbol, :input_image] + required :type, const: :input_image + + # @!attribute detail + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @return [String, nil] + optional :detail, String + + # @!method initialize(image_url:, detail: nil, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage} + # for more details. + # + # An image input to the model. + # + # @param image_url [String] The URL of the image input. + # + # @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param type [Symbol, :input_image] The type of the image input. Always `input_image`. + end + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array)] + + # @type [OpenAI::Internal::Type::Converter] + AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#role + module Role + extend OpenAI::Internal::Type::Enum + + USER = :user + ASSISTANT = :assistant + SYSTEM = :system + DEVELOPER = :developer + + # @!method self.values + # @return [Array] + end + + # The type of the message input. Always `message`. + # + # @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#type + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE = :message + + # @!method self.values + # @return [Array] + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem)] + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + # @!attribute item_reference + # A reference to a variable in the `item` namespace. Ie, "item.name" + # + # @return [String] + required :item_reference, String + + # @!attribute type + # The type of input messages. Always `item_reference`. + # + # @return [Symbol, :item_reference] + required :type, const: :item_reference + + # @!method initialize(item_reference:, type: :item_reference) + # @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.name" + # + # @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference)] + end + + # @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses#sampling_params + class SamplingParams < OpenAI::Internal::Type::BaseModel + # @!attribute max_completion_tokens + # The maximum number of tokens in the generated output. + # + # @return [Integer, nil] + optional :max_completion_tokens, Integer + + # @!attribute seed + # A seed value to initialize the randomness, during sampling. + # + # @return [Integer, nil] + optional :seed, Integer + + # @!attribute temperature + # A higher temperature increases randomness in the outputs. + # + # @return [Float, nil] + optional :temperature, Float + + # @!attribute text + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @return [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text, nil] + optional :text, -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text } + + # @!attribute tools + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # + # @return [Array, nil] + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } + + # @!attribute top_p + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + # + # @return [Float, nil] + optional :top_p, Float + + # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams} + # for more details. + # + # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. + # + # @param seed [Integer] A seed value to initialize the randomness, during sampling. + # + # @param temperature [Float] A higher temperature increases randomness in the outputs. + # + # @param text [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain + # + # @param tools [Array] An array of tools the model may call while generating a response. You + # + # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + + # @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams#text + class Text < OpenAI::Internal::Type::BaseModel + # @!attribute format_ + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + # + # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil] + optional :format_, + union: -> { + OpenAI::Responses::ResponseFormatTextConfig + }, + api_name: :format + + # @!method initialize(format_: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text} + # for more details. + # + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. + end + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunListResponse::DataSource::Responses)] + end + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + # @!attribute cached_tokens + # The number of tokens retrieved from cache. + # + # @return [Integer] + required :cached_tokens, Integer + + # @!attribute completion_tokens + # The number of completion tokens generated. + # + # @return [Integer] + required :completion_tokens, Integer + + # @!attribute invocation_count + # The number of invocations. + # + # @return [Integer] + required :invocation_count, Integer + + # @!attribute model_name + # The name of the model. + # + # @return [String] + required :model_name, String + + # @!attribute prompt_tokens + # The number of prompt tokens used. + # + # @return [Integer] + required :prompt_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens used. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(cached_tokens:, completion_tokens:, invocation_count:, model_name:, prompt_tokens:, total_tokens:) + # @param cached_tokens [Integer] The number of tokens retrieved from cache. + # + # @param completion_tokens [Integer] The number of completion tokens generated. + # + # @param invocation_count [Integer] The number of invocations. + # + # @param model_name [String] The name of the model. + # + # @param prompt_tokens [Integer] The number of prompt tokens used. + # + # @param total_tokens [Integer] The total number of tokens used. + end + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + # @!attribute failed + # Number of tests failed for this criteria. + # + # @return [Integer] + required :failed, Integer + + # @!attribute passed + # Number of tests passed for this criteria. + # + # @return [Integer] + required :passed, Integer + + # @!attribute testing_criteria + # A description of the testing criteria. + # + # @return [String] + required :testing_criteria, String + + # @!method initialize(failed:, passed:, testing_criteria:) + # @param failed [Integer] Number of tests failed for this criteria. + # + # @param passed [Integer] Number of tests passed for this criteria. + # + # @param testing_criteria [String] A description of the testing criteria. + end + + # @see OpenAI::Models::Evals::RunListResponse#result_counts + class ResultCounts < OpenAI::Internal::Type::BaseModel + # @!attribute errored + # Number of output items that resulted in an error. + # + # @return [Integer] + required :errored, Integer + + # @!attribute failed + # Number of output items that failed to pass the evaluation. + # + # @return [Integer] + required :failed, Integer + + # @!attribute passed + # Number of output items that passed the evaluation. + # + # @return [Integer] + required :passed, Integer + + # @!attribute total + # Total number of executed output items. + # + # @return [Integer] + required :total, Integer + + # @!method initialize(errored:, failed:, passed:, total:) + # Counters summarizing the outcomes of the evaluation run. + # + # @param errored [Integer] Number of output items that resulted in an error. + # + # @param failed [Integer] Number of output items that failed to pass the evaluation. + # + # @param passed [Integer] Number of output items that passed the evaluation. + # + # @param total [Integer] Total number of executed output items. + end + end + end + end +end diff --git a/lib/openai/models/evals/run_retrieve_params.rb b/lib/openai/models/evals/run_retrieve_params.rb new file mode 100644 index 00000000..0ca8c695 --- /dev/null +++ b/lib/openai/models/evals/run_retrieve_params.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + # @see OpenAI::Resources::Evals::Runs#retrieve + class RunRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute eval_id + # + # @return [String] + required :eval_id, String + + # @!method initialize(eval_id:, request_options: {}) + # @param eval_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/evals/run_retrieve_response.rb b/lib/openai/models/evals/run_retrieve_response.rb new file mode 100644 index 00000000..942f613c --- /dev/null +++ b/lib/openai/models/evals/run_retrieve_response.rb @@ -0,0 +1,881 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + # @see OpenAI::Resources::Evals::Runs#retrieve + class RunRetrieveResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the evaluation run. + # + # @return [String] + required :id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the evaluation run was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data_source + # Information about the run's data source. + # + # @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses] + required :data_source, union: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource } + + # @!attribute error + # An object representing an error response from the Eval API. + # + # @return [OpenAI::Models::Evals::EvalAPIError] + required :error, -> { OpenAI::Evals::EvalAPIError } + + # @!attribute eval_id + # The identifier of the associated evaluation. + # + # @return [String] + required :eval_id, String + + # @!attribute metadata + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + # + # @return [Hash{Symbol=>String}, nil] + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute model + # The model that is evaluated, if applicable. + # + # @return [String] + required :model, String + + # @!attribute name + # The name of the evaluation run. + # + # @return [String] + required :name, String + + # @!attribute object + # The type of the object. Always "eval.run". + # + # @return [Symbol, :"eval.run"] + required :object, const: :"eval.run" + + # @!attribute per_model_usage + # Usage statistics for each model during the evaluation run. + # + # @return [Array] + required :per_model_usage, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage] } + + # @!attribute per_testing_criteria_results + # Results per testing criteria applied during the evaluation run. + # + # @return [Array] + required :per_testing_criteria_results, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult] } + + # @!attribute report_url + # The URL to the rendered evaluation run report on the UI dashboard. + # + # @return [String] + required :report_url, String + + # @!attribute result_counts + # Counters summarizing the outcomes of the evaluation run. + # + # @return [OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts] + required :result_counts, -> { OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts } + + # @!attribute status + # The status of the evaluation run. + # + # @return [String] + required :status, String + + # @!method initialize(id:, created_at:, data_source:, error:, eval_id:, metadata:, model:, name:, per_model_usage:, per_testing_criteria_results:, report_url:, result_counts:, status:, object: :"eval.run") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunRetrieveResponse} for more details. + # + # A schema representing an evaluation run. + # + # @param id [String] Unique identifier for the evaluation run. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created. + # + # @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses] Information about the run's data source. + # + # @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API. + # + # @param eval_id [String] The identifier of the associated evaluation. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String] The model that is evaluated, if applicable. + # + # @param name [String] The name of the evaluation run. + # + # @param per_model_usage [Array] Usage statistics for each model during the evaluation run. + # + # @param per_testing_criteria_results [Array] Results per testing criteria applied during the evaluation run. + # + # @param report_url [String] The URL to the rendered evaluation run report on the UI dashboard. + # + # @param result_counts [OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts] Counters summarizing the outcomes of the evaluation run. + # + # @param status [String] The status of the evaluation run. + # + # @param object [Symbol, :"eval.run"] The type of the object. Always "eval.run". + + # Information about the run's data source. + # + # @see OpenAI::Models::Evals::RunRetrieveResponse#data_source + module DataSource + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A JsonlRunDataSource object with that specifies a JSONL file that matches the eval + variant :jsonl, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource } + + # A CompletionsRunDataSource object describing a model sampling configuration. + variant :completions, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource } + + # A ResponsesRunDataSource object describing a model sampling configuration. + variant :responses, -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses } + + class Responses < OpenAI::Internal::Type::BaseModel + # @!attribute source + # Determines what populates the `item` namespace in this run's data source. + # + # @return [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses] + required :source, union: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source } + + # @!attribute type + # The type of run data source. Always `responses`. + # + # @return [Symbol, :responses] + required :type, const: :responses + + # @!attribute input_messages + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @return [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference, nil] + optional :input_messages, + union: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages } + + # @!attribute model + # The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @return [String, nil] + optional :model, String + + # @!attribute sampling_params + # + # @return [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams, nil] + optional :sampling_params, + -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams } + + # @!method initialize(source:, input_messages: nil, model: nil, sampling_params: nil, type: :responses) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses} for more + # details. + # + # A ResponsesRunDataSource object describing a model sampling configuration. + # + # @param source [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses] Determines what populates the `item` namespace in this run's data source. + # + # @param input_messages [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i + # + # @param model [String] The name of the model to use for generating completions (e.g. "o3-mini"). + # + # @param sampling_params [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams] + # + # @param type [Symbol, :responses] The type of run data source. Always `responses`. + + # Determines what populates the `item` namespace in this run's data source. + # + # @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses#source + module Source + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :file_content, + -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent } + + variant :file_id, -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID } + + # A EvalResponsesSource object describing a run data source configuration. + variant :responses, + -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses } + + class FileContent < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the jsonl file. + # + # @return [Array] + required :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content] } + + # @!attribute type + # The type of jsonl source. Always `file_content`. + # + # @return [Symbol, :file_content] + required :type, const: :file_content + + # @!method initialize(content:, type: :file_content) + # @param content [Array] The content of the jsonl file. + # + # @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`. + + class Content < OpenAI::Internal::Type::BaseModel + # @!attribute item + # + # @return [Hash{Symbol=>Object}] + required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute sample + # + # @return [Hash{Symbol=>Object}, nil] + optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!method initialize(item:, sample: nil) + # @param item [Hash{Symbol=>Object}] + # @param sample [Hash{Symbol=>Object}] + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The identifier of the file. + # + # @return [String] + required :id, String + + # @!attribute type + # The type of jsonl source. Always `file_id`. + # + # @return [Symbol, :file_id] + required :type, const: :file_id + + # @!method initialize(id:, type: :file_id) + # @param id [String] The identifier of the file. + # + # @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`. + end + + class Responses < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of run data source. Always `responses`. + # + # @return [Symbol, :responses] + required :type, const: :responses + + # @!attribute created_after + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + # + # @return [Integer, nil] + optional :created_after, Integer, nil?: true + + # @!attribute created_before + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + # + # @return [Integer, nil] + optional :created_before, Integer, nil?: true + + # @!attribute instructions_search + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + # + # @return [String, nil] + optional :instructions_search, String, nil?: true + + # @!attribute metadata + # Metadata filter for the responses. This is a query parameter used to select + # responses. + # + # @return [Object, nil] + optional :metadata, OpenAI::Internal::Type::Unknown, nil?: true + + # @!attribute model + # The name of the model to find responses for. This is a query parameter used to + # select responses. + # + # @return [String, nil] + optional :model, String, nil?: true + + # @!attribute reasoning_effort + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + # + # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true + + # @!attribute temperature + # Sampling temperature. This is a query parameter used to select responses. + # + # @return [Float, nil] + optional :temperature, Float, nil?: true + + # @!attribute tools + # List of tool names. This is a query parameter used to select responses. + # + # @return [Array, nil] + optional :tools, OpenAI::Internal::Type::ArrayOf[String], nil?: true + + # @!attribute top_p + # Nucleus sampling parameter. This is a query parameter used to select responses. + # + # @return [Float, nil] + optional :top_p, Float, nil?: true + + # @!attribute users + # List of user identifiers. This is a query parameter used to select responses. + # + # @return [Array, nil] + optional :users, OpenAI::Internal::Type::ArrayOf[String], nil?: true + + # @!method initialize(created_after: nil, created_before: nil, instructions_search: nil, metadata: nil, model: nil, reasoning_effort: nil, temperature: nil, tools: nil, top_p: nil, users: nil, type: :responses) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses} + # for more details. + # + # A EvalResponsesSource object describing a run data source configuration. + # + # @param created_after [Integer, nil] Only include items created after this timestamp (inclusive). This is a query par + # + # @param created_before [Integer, nil] Only include items created before this timestamp (inclusive). This is a query pa + # + # @param instructions_search [String, nil] Optional string to search the 'instructions' field. This is a query parameter us + # + # @param metadata [Object, nil] Metadata filter for the responses. This is a query parameter used to select resp + # + # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s + # + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re + # + # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses. + # + # @param tools [Array, nil] List of tool names. This is a query parameter used to select responses. + # + # @param top_p [Float, nil] Nucleus sampling parameter. This is a query parameter used to select responses. + # + # @param users [Array, nil] List of user identifiers. This is a query parameter used to select responses. + # + # @param type [Symbol, :responses] The type of run data source. Always `responses`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses)] + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + # + # @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses#input_messages + module InputMessages + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :template, + -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template } + + variant :item_reference, + -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference } + + class Template < OpenAI::Internal::Type::BaseModel + # @!attribute template + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + # + # @return [Array] + required :template, + -> do + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template + ] + end + + # @!attribute type + # The type of input messages. Always `template`. + # + # @return [Symbol, :template] + required :type, const: :template + + # @!method initialize(template:, type: :template) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template} + # for more details. + # + # @param template [Array] A list of chat messages forming the prompt or context. May include variable refe + # + # @param type [Symbol, :template] The type of input messages. Always `template`. + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage } + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem } + + class ChatMessage < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the message. + # + # @return [String] + required :content, String + + # @!attribute role + # The role of the message (e.g. "system", "assistant", "user"). + # + # @return [String] + required :role, String + + # @!method initialize(content:, role:) + # @param content [String] The content of the message. + # + # @param role [String] The role of the message (e.g. "system", "assistant", "user"). + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + # @!attribute content + # Inputs to the model - can contain template strings. + # + # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] + required :content, + union: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content } + + # @!attribute role + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @return [Symbol, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] + required :role, + enum: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role } + + # @!attribute type + # The type of the message input. Always `message`. + # + # @return [Symbol, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type, nil] + optional :type, + enum: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type } + + # @!method initialize(content:, role:, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem} + # for more details. + # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + # + # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array] Inputs to the model - can contain template strings. + # + # @param role [Symbol, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or + # + # @param type [Symbol, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`. + + # Inputs to the model - can contain template strings. + # + # @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#content + module Content + extend OpenAI::Internal::Type::Union + + # A text input to the model. + variant String + + # A text input to the model. + variant -> { OpenAI::Responses::ResponseInputText } + + # A text output from the model. + variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText } + + # An image input to the model. + variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage } + + # A list of inputs, each of which may be either an input text or input image object. + variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray } + + class OutputText < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the output text. Always `output_text`. + # + # @return [Symbol, :output_text] + required :type, const: :output_text + + # @!method initialize(text:, type: :output_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText} + # for more details. + # + # A text output from the model. + # + # @param text [String] The text output from the model. + # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + end + + class InputImage < OpenAI::Internal::Type::BaseModel + # @!attribute image_url + # The URL of the image input. + # + # @return [String] + required :image_url, String + + # @!attribute type + # The type of the image input. Always `input_image`. + # + # @return [Symbol, :input_image] + required :type, const: :input_image + + # @!attribute detail + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @return [String, nil] + optional :detail, String + + # @!method initialize(image_url:, detail: nil, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage} + # for more details. + # + # An image input to the model. + # + # @param image_url [String] The URL of the image input. + # + # @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param type [Symbol, :input_image] The type of the image input. Always `input_image`. + end + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array)] + + # @type [OpenAI::Internal::Type::Converter] + AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#role + module Role + extend OpenAI::Internal::Type::Enum + + USER = :user + ASSISTANT = :assistant + SYSTEM = :system + DEVELOPER = :developer + + # @!method self.values + # @return [Array] + end + + # The type of the message input. Always `message`. + # + # @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#type + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE = :message + + # @!method self.values + # @return [Array] + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem)] + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + # @!attribute item_reference + # A reference to a variable in the `item` namespace. Ie, "item.name" + # + # @return [String] + required :item_reference, String + + # @!attribute type + # The type of input messages. Always `item_reference`. + # + # @return [Symbol, :item_reference] + required :type, const: :item_reference + + # @!method initialize(item_reference:, type: :item_reference) + # @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.name" + # + # @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference)] + end + + # @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses#sampling_params + class SamplingParams < OpenAI::Internal::Type::BaseModel + # @!attribute max_completion_tokens + # The maximum number of tokens in the generated output. + # + # @return [Integer, nil] + optional :max_completion_tokens, Integer + + # @!attribute seed + # A seed value to initialize the randomness, during sampling. + # + # @return [Integer, nil] + optional :seed, Integer + + # @!attribute temperature + # A higher temperature increases randomness in the outputs. + # + # @return [Float, nil] + optional :temperature, Float + + # @!attribute text + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @return [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, nil] + optional :text, + -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text } + + # @!attribute tools + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # + # @return [Array, nil] + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } + + # @!attribute top_p + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + # + # @return [Float, nil] + optional :top_p, Float + + # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams} + # for more details. + # + # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output. + # + # @param seed [Integer] A seed value to initialize the randomness, during sampling. + # + # @param temperature [Float] A higher temperature increases randomness in the outputs. + # + # @param text [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain + # + # @param tools [Array] An array of tools the model may call while generating a response. You + # + # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + + # @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams#text + class Text < OpenAI::Internal::Type::BaseModel + # @!attribute format_ + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + # + # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil] + optional :format_, + union: -> { + OpenAI::Responses::ResponseFormatTextConfig + }, + api_name: :format + + # @!method initialize(format_: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text} + # for more details. + # + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. + end + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses)] + end + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + # @!attribute cached_tokens + # The number of tokens retrieved from cache. + # + # @return [Integer] + required :cached_tokens, Integer + + # @!attribute completion_tokens + # The number of completion tokens generated. + # + # @return [Integer] + required :completion_tokens, Integer + + # @!attribute invocation_count + # The number of invocations. + # + # @return [Integer] + required :invocation_count, Integer + + # @!attribute model_name + # The name of the model. + # + # @return [String] + required :model_name, String + + # @!attribute prompt_tokens + # The number of prompt tokens used. + # + # @return [Integer] + required :prompt_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens used. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(cached_tokens:, completion_tokens:, invocation_count:, model_name:, prompt_tokens:, total_tokens:) + # @param cached_tokens [Integer] The number of tokens retrieved from cache. + # + # @param completion_tokens [Integer] The number of completion tokens generated. + # + # @param invocation_count [Integer] The number of invocations. + # + # @param model_name [String] The name of the model. + # + # @param prompt_tokens [Integer] The number of prompt tokens used. + # + # @param total_tokens [Integer] The total number of tokens used. + end + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + # @!attribute failed + # Number of tests failed for this criteria. + # + # @return [Integer] + required :failed, Integer + + # @!attribute passed + # Number of tests passed for this criteria. + # + # @return [Integer] + required :passed, Integer + + # @!attribute testing_criteria + # A description of the testing criteria. + # + # @return [String] + required :testing_criteria, String + + # @!method initialize(failed:, passed:, testing_criteria:) + # @param failed [Integer] Number of tests failed for this criteria. + # + # @param passed [Integer] Number of tests passed for this criteria. + # + # @param testing_criteria [String] A description of the testing criteria. + end + + # @see OpenAI::Models::Evals::RunRetrieveResponse#result_counts + class ResultCounts < OpenAI::Internal::Type::BaseModel + # @!attribute errored + # Number of output items that resulted in an error. + # + # @return [Integer] + required :errored, Integer + + # @!attribute failed + # Number of output items that failed to pass the evaluation. + # + # @return [Integer] + required :failed, Integer + + # @!attribute passed + # Number of output items that passed the evaluation. + # + # @return [Integer] + required :passed, Integer + + # @!attribute total + # Total number of executed output items. + # + # @return [Integer] + required :total, Integer + + # @!method initialize(errored:, failed:, passed:, total:) + # Counters summarizing the outcomes of the evaluation run. + # + # @param errored [Integer] Number of output items that resulted in an error. + # + # @param failed [Integer] Number of output items that failed to pass the evaluation. + # + # @param passed [Integer] Number of output items that passed the evaluation. + # + # @param total [Integer] Total number of executed output items. + end + end + end + end +end diff --git a/lib/openai/models/evals/runs/output_item_list_params.rb b/lib/openai/models/evals/runs/output_item_list_params.rb new file mode 100644 index 00000000..7fd27487 --- /dev/null +++ b/lib/openai/models/evals/runs/output_item_list_params.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + module Runs + # @see OpenAI::Resources::Evals::Runs::OutputItems#list + class OutputItemListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute eval_id + # + # @return [String] + required :eval_id, String + + # @!attribute after + # Identifier for the last output item from the previous pagination request. + # + # @return [String, nil] + optional :after, String + + # @!attribute limit + # Number of output items to retrieve. + # + # @return [Integer, nil] + optional :limit, Integer + + # @!attribute order + # Sort order for output items by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + # + # @return [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Order, nil] + optional :order, enum: -> { OpenAI::Evals::Runs::OutputItemListParams::Order } + + # @!attribute status + # Filter output items by status. Use `failed` to filter by failed output items or + # `pass` to filter by passed output items. + # + # @return [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Status, nil] + optional :status, enum: -> { OpenAI::Evals::Runs::OutputItemListParams::Status } + + # @!method initialize(eval_id:, after: nil, limit: nil, order: nil, status: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::Runs::OutputItemListParams} for more details. + # + # @param eval_id [String] + # + # @param after [String] Identifier for the last output item from the previous pagination request. + # + # @param limit [Integer] Number of output items to retrieve. + # + # @param order [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Order] Sort order for output items by timestamp. Use `asc` for ascending order or `desc + # + # @param status [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Status] Filter output items by status. Use `failed` to filter by failed output + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # Sort order for output items by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + module Order + extend OpenAI::Internal::Type::Enum + + ASC = :asc + DESC = :desc + + # @!method self.values + # @return [Array] + end + + # Filter output items by status. Use `failed` to filter by failed output items or + # `pass` to filter by passed output items. + module Status + extend OpenAI::Internal::Type::Enum + + FAIL = :fail + PASS = :pass + + # @!method self.values + # @return [Array] + end + end + end + end + end +end diff --git a/lib/openai/models/evals/runs/output_item_list_response.rb b/lib/openai/models/evals/runs/output_item_list_response.rb new file mode 100644 index 00000000..d3271c97 --- /dev/null +++ b/lib/openai/models/evals/runs/output_item_list_response.rb @@ -0,0 +1,265 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + module Runs + # @see OpenAI::Resources::Evals::Runs::OutputItems#list + class OutputItemListResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the evaluation run output item. + # + # @return [String] + required :id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the evaluation run was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute datasource_item + # Details of the input data source item. + # + # @return [Hash{Symbol=>Object}] + required :datasource_item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute datasource_item_id + # The identifier for the data source item. + # + # @return [Integer] + required :datasource_item_id, Integer + + # @!attribute eval_id + # The identifier of the evaluation group. + # + # @return [String] + required :eval_id, String + + # @!attribute object + # The type of the object. Always "eval.run.output_item". + # + # @return [Symbol, :"eval.run.output_item"] + required :object, const: :"eval.run.output_item" + + # @!attribute results + # A list of results from the evaluation run. + # + # @return [ArrayObject}>] + required :results, + OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]] + + # @!attribute run_id + # The identifier of the evaluation run associated with this output item. + # + # @return [String] + required :run_id, String + + # @!attribute sample + # A sample containing the input and output of the evaluation run. + # + # @return [OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample] + required :sample, -> { OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample } + + # @!attribute status + # The status of the evaluation run. + # + # @return [String] + required :status, String + + # @!method initialize(id:, created_at:, datasource_item:, datasource_item_id:, eval_id:, results:, run_id:, sample:, status:, object: :"eval.run.output_item") + # A schema representing an evaluation run output item. + # + # @param id [String] Unique identifier for the evaluation run output item. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created. + # + # @param datasource_item [Hash{Symbol=>Object}] Details of the input data source item. + # + # @param datasource_item_id [Integer] The identifier for the data source item. + # + # @param eval_id [String] The identifier of the evaluation group. + # + # @param results [ArrayObject}>] A list of results from the evaluation run. + # + # @param run_id [String] The identifier of the evaluation run associated with this output item. + # + # @param sample [OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample] A sample containing the input and output of the evaluation run. + # + # @param status [String] The status of the evaluation run. + # + # @param object [Symbol, :"eval.run.output_item"] The type of the object. Always "eval.run.output_item". + + # @see OpenAI::Models::Evals::Runs::OutputItemListResponse#sample + class Sample < OpenAI::Internal::Type::BaseModel + # @!attribute error + # An object representing an error response from the Eval API. + # + # @return [OpenAI::Models::Evals::EvalAPIError] + required :error, -> { OpenAI::Evals::EvalAPIError } + + # @!attribute finish_reason + # The reason why the sample generation was finished. + # + # @return [String] + required :finish_reason, String + + # @!attribute input + # An array of input messages. + # + # @return [Array] + required :input, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input] } + + # @!attribute max_completion_tokens + # The maximum number of tokens allowed for completion. + # + # @return [Integer] + required :max_completion_tokens, Integer + + # @!attribute model + # The model used for generating the sample. + # + # @return [String] + required :model, String + + # @!attribute output + # An array of output messages. + # + # @return [Array] + required :output, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output] } + + # @!attribute seed + # The seed used for generating the sample. + # + # @return [Integer] + required :seed, Integer + + # @!attribute temperature + # The sampling temperature used. + # + # @return [Float] + required :temperature, Float + + # @!attribute top_p + # The top_p value used for sampling. + # + # @return [Float] + required :top_p, Float + + # @!attribute usage + # Token usage details for the sample. + # + # @return [OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage] + required :usage, -> { OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage } + + # @!method initialize(error:, finish_reason:, input:, max_completion_tokens:, model:, output:, seed:, temperature:, top_p:, usage:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample} for more details. + # + # A sample containing the input and output of the evaluation run. + # + # @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API. + # + # @param finish_reason [String] The reason why the sample generation was finished. + # + # @param input [Array] An array of input messages. + # + # @param max_completion_tokens [Integer] The maximum number of tokens allowed for completion. + # + # @param model [String] The model used for generating the sample. + # + # @param output [Array] An array of output messages. + # + # @param seed [Integer] The seed used for generating the sample. + # + # @param temperature [Float] The sampling temperature used. + # + # @param top_p [Float] The top_p value used for sampling. + # + # @param usage [OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage] Token usage details for the sample. + + class Input < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the message. + # + # @return [String] + required :content, String + + # @!attribute role + # The role of the message sender (e.g., system, user, developer). + # + # @return [String] + required :role, String + + # @!method initialize(content:, role:) + # An input message. + # + # @param content [String] The content of the message. + # + # @param role [String] The role of the message sender (e.g., system, user, developer). + end + + class Output < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the message. + # + # @return [String, nil] + optional :content, String + + # @!attribute role + # The role of the message (e.g. "system", "assistant", "user"). + # + # @return [String, nil] + optional :role, String + + # @!method initialize(content: nil, role: nil) + # @param content [String] The content of the message. + # + # @param role [String] The role of the message (e.g. "system", "assistant", "user"). + end + + # @see OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample#usage + class Usage < OpenAI::Internal::Type::BaseModel + # @!attribute cached_tokens + # The number of tokens retrieved from cache. + # + # @return [Integer] + required :cached_tokens, Integer + + # @!attribute completion_tokens + # The number of completion tokens generated. + # + # @return [Integer] + required :completion_tokens, Integer + + # @!attribute prompt_tokens + # The number of prompt tokens used. + # + # @return [Integer] + required :prompt_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens used. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(cached_tokens:, completion_tokens:, prompt_tokens:, total_tokens:) + # Token usage details for the sample. + # + # @param cached_tokens [Integer] The number of tokens retrieved from cache. + # + # @param completion_tokens [Integer] The number of completion tokens generated. + # + # @param prompt_tokens [Integer] The number of prompt tokens used. + # + # @param total_tokens [Integer] The total number of tokens used. + end + end + end + end + end + end +end diff --git a/lib/openai/models/evals/runs/output_item_retrieve_params.rb b/lib/openai/models/evals/runs/output_item_retrieve_params.rb new file mode 100644 index 00000000..d85fa9e5 --- /dev/null +++ b/lib/openai/models/evals/runs/output_item_retrieve_params.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + module Runs + # @see OpenAI::Resources::Evals::Runs::OutputItems#retrieve + class OutputItemRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute eval_id + # + # @return [String] + required :eval_id, String + + # @!attribute run_id + # + # @return [String] + required :run_id, String + + # @!method initialize(eval_id:, run_id:, request_options: {}) + # @param eval_id [String] + # @param run_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end + end +end diff --git a/lib/openai/models/evals/runs/output_item_retrieve_response.rb b/lib/openai/models/evals/runs/output_item_retrieve_response.rb new file mode 100644 index 00000000..e43f1fcf --- /dev/null +++ b/lib/openai/models/evals/runs/output_item_retrieve_response.rb @@ -0,0 +1,266 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Evals + module Runs + # @see OpenAI::Resources::Evals::Runs::OutputItems#retrieve + class OutputItemRetrieveResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # Unique identifier for the evaluation run output item. + # + # @return [String] + required :id, String + + # @!attribute created_at + # Unix timestamp (in seconds) when the evaluation run was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute datasource_item + # Details of the input data source item. + # + # @return [Hash{Symbol=>Object}] + required :datasource_item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute datasource_item_id + # The identifier for the data source item. + # + # @return [Integer] + required :datasource_item_id, Integer + + # @!attribute eval_id + # The identifier of the evaluation group. + # + # @return [String] + required :eval_id, String + + # @!attribute object + # The type of the object. Always "eval.run.output_item". + # + # @return [Symbol, :"eval.run.output_item"] + required :object, const: :"eval.run.output_item" + + # @!attribute results + # A list of results from the evaluation run. + # + # @return [ArrayObject}>] + required :results, + OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]] + + # @!attribute run_id + # The identifier of the evaluation run associated with this output item. + # + # @return [String] + required :run_id, String + + # @!attribute sample + # A sample containing the input and output of the evaluation run. + # + # @return [OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample] + required :sample, -> { OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample } + + # @!attribute status + # The status of the evaluation run. + # + # @return [String] + required :status, String + + # @!method initialize(id:, created_at:, datasource_item:, datasource_item_id:, eval_id:, results:, run_id:, sample:, status:, object: :"eval.run.output_item") + # A schema representing an evaluation run output item. + # + # @param id [String] Unique identifier for the evaluation run output item. + # + # @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created. + # + # @param datasource_item [Hash{Symbol=>Object}] Details of the input data source item. + # + # @param datasource_item_id [Integer] The identifier for the data source item. + # + # @param eval_id [String] The identifier of the evaluation group. + # + # @param results [ArrayObject}>] A list of results from the evaluation run. + # + # @param run_id [String] The identifier of the evaluation run associated with this output item. + # + # @param sample [OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample] A sample containing the input and output of the evaluation run. + # + # @param status [String] The status of the evaluation run. + # + # @param object [Symbol, :"eval.run.output_item"] The type of the object. Always "eval.run.output_item". + + # @see OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse#sample + class Sample < OpenAI::Internal::Type::BaseModel + # @!attribute error + # An object representing an error response from the Eval API. + # + # @return [OpenAI::Models::Evals::EvalAPIError] + required :error, -> { OpenAI::Evals::EvalAPIError } + + # @!attribute finish_reason + # The reason why the sample generation was finished. + # + # @return [String] + required :finish_reason, String + + # @!attribute input + # An array of input messages. + # + # @return [Array] + required :input, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input] } + + # @!attribute max_completion_tokens + # The maximum number of tokens allowed for completion. + # + # @return [Integer] + required :max_completion_tokens, Integer + + # @!attribute model + # The model used for generating the sample. + # + # @return [String] + required :model, String + + # @!attribute output + # An array of output messages. + # + # @return [Array] + required :output, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output] } + + # @!attribute seed + # The seed used for generating the sample. + # + # @return [Integer] + required :seed, Integer + + # @!attribute temperature + # The sampling temperature used. + # + # @return [Float] + required :temperature, Float + + # @!attribute top_p + # The top_p value used for sampling. + # + # @return [Float] + required :top_p, Float + + # @!attribute usage + # Token usage details for the sample. + # + # @return [OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage] + required :usage, -> { OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage } + + # @!method initialize(error:, finish_reason:, input:, max_completion_tokens:, model:, output:, seed:, temperature:, top_p:, usage:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample} for more + # details. + # + # A sample containing the input and output of the evaluation run. + # + # @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API. + # + # @param finish_reason [String] The reason why the sample generation was finished. + # + # @param input [Array] An array of input messages. + # + # @param max_completion_tokens [Integer] The maximum number of tokens allowed for completion. + # + # @param model [String] The model used for generating the sample. + # + # @param output [Array] An array of output messages. + # + # @param seed [Integer] The seed used for generating the sample. + # + # @param temperature [Float] The sampling temperature used. + # + # @param top_p [Float] The top_p value used for sampling. + # + # @param usage [OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage] Token usage details for the sample. + + class Input < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the message. + # + # @return [String] + required :content, String + + # @!attribute role + # The role of the message sender (e.g., system, user, developer). + # + # @return [String] + required :role, String + + # @!method initialize(content:, role:) + # An input message. + # + # @param content [String] The content of the message. + # + # @param role [String] The role of the message sender (e.g., system, user, developer). + end + + class Output < OpenAI::Internal::Type::BaseModel + # @!attribute content + # The content of the message. + # + # @return [String, nil] + optional :content, String + + # @!attribute role + # The role of the message (e.g. "system", "assistant", "user"). + # + # @return [String, nil] + optional :role, String + + # @!method initialize(content: nil, role: nil) + # @param content [String] The content of the message. + # + # @param role [String] The role of the message (e.g. "system", "assistant", "user"). + end + + # @see OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample#usage + class Usage < OpenAI::Internal::Type::BaseModel + # @!attribute cached_tokens + # The number of tokens retrieved from cache. + # + # @return [Integer] + required :cached_tokens, Integer + + # @!attribute completion_tokens + # The number of completion tokens generated. + # + # @return [Integer] + required :completion_tokens, Integer + + # @!attribute prompt_tokens + # The number of prompt tokens used. + # + # @return [Integer] + required :prompt_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens used. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(cached_tokens:, completion_tokens:, prompt_tokens:, total_tokens:) + # Token usage details for the sample. + # + # @param cached_tokens [Integer] The number of tokens retrieved from cache. + # + # @param completion_tokens [Integer] The number of completion tokens generated. + # + # @param prompt_tokens [Integer] The number of prompt tokens used. + # + # @param total_tokens [Integer] The total number of tokens used. + end + end + end + end + end + end +end diff --git a/lib/openai/models/file_chunking_strategy.rb b/lib/openai/models/file_chunking_strategy.rb index 948ebefc..9d196745 100644 --- a/lib/openai/models/file_chunking_strategy.rb +++ b/lib/openai/models/file_chunking_strategy.rb @@ -2,16 +2,19 @@ module OpenAI module Models - # @abstract - # # The strategy used to chunk the file. - class FileChunkingStrategy < OpenAI::Union + module FileChunkingStrategy + extend OpenAI::Internal::Type::Union + discriminator :type - variant :static, -> { OpenAI::Models::StaticFileChunkingStrategyObject } + variant :static, -> { OpenAI::StaticFileChunkingStrategyObject } # This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. - variant :other, -> { OpenAI::Models::OtherFileChunkingStrategyObject } + variant :other, -> { OpenAI::OtherFileChunkingStrategyObject } + + # @!method self.variants + # @return [Array(OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject)] end end end diff --git a/lib/openai/models/file_chunking_strategy_param.rb b/lib/openai/models/file_chunking_strategy_param.rb index 77ca2e6a..8f756013 100644 --- a/lib/openai/models/file_chunking_strategy_param.rb +++ b/lib/openai/models/file_chunking_strategy_param.rb @@ -2,18 +2,21 @@ module OpenAI module Models - # @abstract - # # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. - class FileChunkingStrategyParam < OpenAI::Union + # strategy. Only applicable if `file_ids` is non-empty. + module FileChunkingStrategyParam + extend OpenAI::Internal::Type::Union + discriminator :type # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - variant :auto, -> { OpenAI::Models::AutoFileChunkingStrategyParam } + variant :auto, -> { OpenAI::AutoFileChunkingStrategyParam } # Customize your own chunking strategy by setting chunk size and chunk overlap. - variant :static, -> { OpenAI::Models::StaticFileChunkingStrategyObjectParam } + variant :static, -> { OpenAI::StaticFileChunkingStrategyObjectParam } + + # @!method self.variants + # @return [Array(OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam)] end end end diff --git a/lib/openai/models/file_content.rb b/lib/openai/models/file_content.rb new file mode 100644 index 00000000..0ffd9f84 --- /dev/null +++ b/lib/openai/models/file_content.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +module OpenAI + module Models + FileContent = String + end +end diff --git a/lib/openai/models/file_content_params.rb b/lib/openai/models/file_content_params.rb index 38b39c6c..22d607fb 100644 --- a/lib/openai/models/file_content_params.rb +++ b/lib/openai/models/file_content_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class FileContentParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Files#content + class FileContentParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/file_create_params.rb b/lib/openai/models/file_create_params.rb index 23e108a1..7de18db4 100644 --- a/lib/openai/models/file_create_params.rb +++ b/lib/openai/models/file_create_params.rb @@ -2,34 +2,71 @@ module OpenAI module Models - class FileCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Files#create + class FileCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute file # The File object (not file name) to be uploaded. # - # @return [IO, StringIO] - required :file, IO + # @return [Pathname, StringIO, IO, String, OpenAI::FilePart] + required :file, OpenAI::Internal::Type::FileInput # @!attribute purpose # The intended purpose of the uploaded file. One of: - `assistants`: Used in the - # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - # Flexible file type for any purpose - `evals`: Used for eval data sets + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets # # @return [Symbol, OpenAI::Models::FilePurpose] - required :purpose, enum: -> { OpenAI::Models::FilePurpose } + required :purpose, enum: -> { OpenAI::FilePurpose } - # @!parse - # # @param file [IO, StringIO] - # # @param purpose [Symbol, OpenAI::Models::FilePurpose] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(file:, purpose:, request_options: {}, **) = super + # @!attribute expires_after + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + # + # @return [OpenAI::Models::FileCreateParams::ExpiresAfter, nil] + optional :expires_after, -> { OpenAI::FileCreateParams::ExpiresAfter } + + # @!method initialize(file:, purpose:, expires_after: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FileCreateParams} for more details. + # + # @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The File object (not file name) to be uploaded. + # + # @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file. One of: - `assistants`: Used in the A + # + # @param expires_after [OpenAI::Models::FileCreateParams::ExpiresAfter] The expiration policy for a file. By default, files with `purpose=batch` expire + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + # @!attribute anchor + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `created_at`. + # + # @return [Symbol, :created_at] + required :anchor, const: :created_at + + # @!attribute seconds + # The number of seconds after the anchor time that the file will expire. Must be + # between 3600 (1 hour) and 2592000 (30 days). + # + # @return [Integer] + required :seconds, Integer - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(seconds:, anchor: :created_at) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FileCreateParams::ExpiresAfter} for more details. + # + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + # + # @param seconds [Integer] The number of seconds after the anchor time that the file will expire. Must be b + # + # @param anchor [Symbol, :created_at] Anchor timestamp after which the expiration policy applies. Supported anchors: ` + end end end end diff --git a/lib/openai/models/file_delete_params.rb b/lib/openai/models/file_delete_params.rb index 54238ca0..3893e91d 100644 --- a/lib/openai/models/file_delete_params.rb +++ b/lib/openai/models/file_delete_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class FileDeleteParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Files#delete + class FileDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/file_deleted.rb b/lib/openai/models/file_deleted.rb index 9eb014a2..28517280 100644 --- a/lib/openai/models/file_deleted.rb +++ b/lib/openai/models/file_deleted.rb @@ -2,7 +2,8 @@ module OpenAI module Models - class FileDeleted < OpenAI::BaseModel + # @see OpenAI::Resources::Files#delete + class FileDeleted < OpenAI::Internal::Type::BaseModel # @!attribute id # # @return [String] @@ -11,21 +12,17 @@ class FileDeleted < OpenAI::BaseModel # @!attribute deleted # # @return [Boolean] - required :deleted, OpenAI::BooleanModel + required :deleted, OpenAI::Internal::Type::Boolean # @!attribute object # # @return [Symbol, :file] required :object, const: :file - # @!parse - # # @param id [String] - # # @param deleted [Boolean] - # # @param object [Symbol, :file] - # # - # def initialize(id:, deleted:, object: :file, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, deleted:, object: :file) + # @param id [String] + # @param deleted [Boolean] + # @param object [Symbol, :file] end end end diff --git a/lib/openai/models/file_list_params.rb b/lib/openai/models/file_list_params.rb index 3a3c6b9b..193eebac 100644 --- a/lib/openai/models/file_list_params.rb +++ b/lib/openai/models/file_list_params.rb @@ -2,76 +2,64 @@ module OpenAI module Models - class FileListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Files#list + class FileListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] limit + # @!attribute limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 10,000, and the default is 10,000. + # 10,000, and the default is 10,000. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!attribute [r] order + # @!attribute order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::FileListParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::FileListParams::Order } - - # @!parse - # # @return [Symbol, OpenAI::Models::FileListParams::Order] - # attr_writer :order + optional :order, enum: -> { OpenAI::FileListParams::Order } - # @!attribute [r] purpose + # @!attribute purpose # Only return files with the given purpose. # # @return [String, nil] optional :purpose, String - # @!parse - # # @return [String] - # attr_writer :purpose - - # @!parse - # # @param after [String] - # # @param limit [Integer] - # # @param order [Symbol, OpenAI::Models::FileListParams::Order] - # # @param purpose [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, limit: nil, order: nil, purpose: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(after: nil, limit: nil, order: nil, purpose: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FileListParams} for more details. # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param purpose [String] Only return files with the given purpose. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. - class Order < OpenAI::Enum + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/file_object.rb b/lib/openai/models/file_object.rb index f0476f5d..6efe2e16 100644 --- a/lib/openai/models/file_object.rb +++ b/lib/openai/models/file_object.rb @@ -2,7 +2,8 @@ module OpenAI module Models - class FileObject < OpenAI::BaseModel + # @see OpenAI::Resources::Files#create + class FileObject < OpenAI::Internal::Type::BaseModel # @!attribute id # The file identifier, which can be referenced in the API endpoints. # @@ -35,76 +36,68 @@ class FileObject < OpenAI::BaseModel # @!attribute purpose # The intended purpose of the file. Supported values are `assistants`, - # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` - # and `vision`. + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results`, + # `vision`, and `user_data`. # # @return [Symbol, OpenAI::Models::FileObject::Purpose] - required :purpose, enum: -> { OpenAI::Models::FileObject::Purpose } + required :purpose, enum: -> { OpenAI::FileObject::Purpose } # @!attribute status + # @deprecated + # # Deprecated. The current status of the file, which can be either `uploaded`, - # `processed`, or `error`. + # `processed`, or `error`. # # @return [Symbol, OpenAI::Models::FileObject::Status] - required :status, enum: -> { OpenAI::Models::FileObject::Status } + required :status, enum: -> { OpenAI::FileObject::Status } - # @!attribute [r] expires_at + # @!attribute expires_at # The Unix timestamp (in seconds) for when the file will expire. # # @return [Integer, nil] optional :expires_at, Integer - # @!parse - # # @return [Integer] - # attr_writer :expires_at - - # @!attribute [r] status_details + # @!attribute status_details + # @deprecated + # # Deprecated. For details on why a fine-tuning training file failed validation, - # see the `error` field on `fine_tuning.job`. + # see the `error` field on `fine_tuning.job`. # # @return [String, nil] optional :status_details, String - # @!parse - # # @return [String] - # attr_writer :status_details - - # @!parse - # # The `File` object represents a document that has been uploaded to OpenAI. - # # - # # @param id [String] - # # @param bytes [Integer] - # # @param created_at [Integer] - # # @param filename [String] - # # @param purpose [Symbol, OpenAI::Models::FileObject::Purpose] - # # @param status [Symbol, OpenAI::Models::FileObject::Status] - # # @param expires_at [Integer] - # # @param status_details [String] - # # @param object [Symbol, :file] - # # - # def initialize( - # id:, - # bytes:, - # created_at:, - # filename:, - # purpose:, - # status:, - # expires_at: nil, - # status_details: nil, - # object: :file, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(id:, bytes:, created_at:, filename:, purpose:, status:, expires_at: nil, status_details: nil, object: :file) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FileObject} for more details. + # + # The `File` object represents a document that has been uploaded to OpenAI. + # + # @param id [String] The file identifier, which can be referenced in the API endpoints. + # + # @param bytes [Integer] The size of the file, in bytes. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the file was created. + # + # @param filename [String] The name of the file. + # + # @param purpose [Symbol, OpenAI::Models::FileObject::Purpose] The intended purpose of the file. Supported values are `assistants`, `assistants # + # @param status [Symbol, OpenAI::Models::FileObject::Status] Deprecated. The current status of the file, which can be either `uploaded`, `pro + # + # @param expires_at [Integer] The Unix timestamp (in seconds) for when the file will expire. + # + # @param status_details [String] Deprecated. For details on why a fine-tuning training file failed validation, se + # + # @param object [Symbol, :file] The object type, which is always `file`. + # The intended purpose of the file. Supported values are `assistants`, - # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` - # and `vision`. - class Purpose < OpenAI::Enum + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results`, + # `vision`, and `user_data`. + # + # @see OpenAI::Models::FileObject#purpose + module Purpose + extend OpenAI::Internal::Type::Enum + ASSISTANTS = :assistants ASSISTANTS_OUTPUT = :assistants_output BATCH = :batch @@ -112,22 +105,27 @@ class Purpose < OpenAI::Enum FINE_TUNE = :"fine-tune" FINE_TUNE_RESULTS = :"fine-tune-results" VISION = :vision + USER_DATA = :user_data - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # @deprecated # # Deprecated. The current status of the file, which can be either `uploaded`, - # `processed`, or `error`. - class Status < OpenAI::Enum + # `processed`, or `error`. + # + # @see OpenAI::Models::FileObject#status + module Status + extend OpenAI::Internal::Type::Enum + UPLOADED = :uploaded PROCESSED = :processed ERROR = :error - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/file_purpose.rb b/lib/openai/models/file_purpose.rb index 8b4f9af2..0f1ca442 100644 --- a/lib/openai/models/file_purpose.rb +++ b/lib/openai/models/file_purpose.rb @@ -2,13 +2,13 @@ module OpenAI module Models - # @abstract - # # The intended purpose of the uploaded file. One of: - `assistants`: Used in the - # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - # Flexible file type for any purpose - `evals`: Used for eval data sets - class FilePurpose < OpenAI::Enum + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets + module FilePurpose + extend OpenAI::Internal::Type::Enum + ASSISTANTS = :assistants BATCH = :batch FINE_TUNE = :"fine-tune" @@ -16,7 +16,8 @@ class FilePurpose < OpenAI::Enum USER_DATA = :user_data EVALS = :evals - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/file_retrieve_params.rb b/lib/openai/models/file_retrieve_params.rb index 391639a3..29c0bdc3 100644 --- a/lib/openai/models/file_retrieve_params.rb +++ b/lib/openai/models/file_retrieve_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class FileRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Files#retrieve + class FileRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/fine_tuning/alpha/grader_run_params.rb b/lib/openai/models/fine_tuning/alpha/grader_run_params.rb new file mode 100644 index 00000000..152ac2a1 --- /dev/null +++ b/lib/openai/models/fine_tuning/alpha/grader_run_params.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + module Alpha + # @see OpenAI::Resources::FineTuning::Alpha::Graders#run + class GraderRunParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute grader + # The grader used for the fine-tuning job. + # + # @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] + required :grader, union: -> { OpenAI::FineTuning::Alpha::GraderRunParams::Grader } + + # @!attribute model_sample + # The model sample to be evaluated. This value will be used to populate the + # `sample` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + # The `output_json` variable will be populated if the model sample is a valid JSON + # string. + # + # @return [String] + required :model_sample, String + + # @!attribute item + # The dataset item provided to the grader. This will be used to populate the + # `item` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + # + # @return [Object, nil] + optional :item, OpenAI::Internal::Type::Unknown + + # @!method initialize(grader:, model_sample:, item: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::Alpha::GraderRunParams} for more details. + # + # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job. + # + # @param model_sample [String] The model sample to be evaluated. This value will be used to populate + # + # @param item [Object] The dataset item provided to the grader. This will be used to populate + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # The grader used for the fine-tuning job. + module Grader + extend OpenAI::Internal::Type::Union + + discriminator :type + + # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + variant :string_check, -> { OpenAI::Graders::StringCheckGrader } + + # A TextSimilarityGrader object which grades text based on similarity metrics. + variant :text_similarity, -> { OpenAI::Graders::TextSimilarityGrader } + + # A PythonGrader object that runs a python script on the input. + variant :python, -> { OpenAI::Graders::PythonGrader } + + # A ScoreModelGrader object that uses a model to assign a score to the input. + variant :score_model, -> { OpenAI::Graders::ScoreModelGrader } + + # A MultiGrader object combines the output of multiple graders to produce a single score. + variant :multi, -> { OpenAI::Graders::MultiGrader } + + # @!method self.variants + # @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)] + end + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/alpha/grader_run_response.rb b/lib/openai/models/fine_tuning/alpha/grader_run_response.rb new file mode 100644 index 00000000..2b6bdaff --- /dev/null +++ b/lib/openai/models/fine_tuning/alpha/grader_run_response.rb @@ -0,0 +1,175 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + module Alpha + # @see OpenAI::Resources::FineTuning::Alpha::Graders#run + class GraderRunResponse < OpenAI::Internal::Type::BaseModel + # @!attribute metadata + # + # @return [OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata] + required :metadata, -> { OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata } + + # @!attribute model_grader_token_usage_per_model + # + # @return [Hash{Symbol=>Object}] + required :model_grader_token_usage_per_model, + OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute reward + # + # @return [Float] + required :reward, Float + + # @!attribute sub_rewards + # + # @return [Hash{Symbol=>Object}] + required :sub_rewards, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!method initialize(metadata:, model_grader_token_usage_per_model:, reward:, sub_rewards:) + # @param metadata [OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata] + # @param model_grader_token_usage_per_model [Hash{Symbol=>Object}] + # @param reward [Float] + # @param sub_rewards [Hash{Symbol=>Object}] + + # @see OpenAI::Models::FineTuning::Alpha::GraderRunResponse#metadata + class Metadata < OpenAI::Internal::Type::BaseModel + # @!attribute errors + # + # @return [OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors] + required :errors, -> { OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors } + + # @!attribute execution_time + # + # @return [Float] + required :execution_time, Float + + # @!attribute name + # + # @return [String] + required :name, String + + # @!attribute sampled_model_name + # + # @return [String, nil] + required :sampled_model_name, String, nil?: true + + # @!attribute scores + # + # @return [Hash{Symbol=>Object}] + required :scores, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] + + # @!attribute token_usage + # + # @return [Integer, nil] + required :token_usage, Integer, nil?: true + + # @!attribute type + # + # @return [String] + required :type, String + + # @!method initialize(errors:, execution_time:, name:, sampled_model_name:, scores:, token_usage:, type:) + # @param errors [OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors] + # @param execution_time [Float] + # @param name [String] + # @param sampled_model_name [String, nil] + # @param scores [Hash{Symbol=>Object}] + # @param token_usage [Integer, nil] + # @param type [String] + + # @see OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata#errors + class Errors < OpenAI::Internal::Type::BaseModel + # @!attribute formula_parse_error + # + # @return [Boolean] + required :formula_parse_error, OpenAI::Internal::Type::Boolean + + # @!attribute invalid_variable_error + # + # @return [Boolean] + required :invalid_variable_error, OpenAI::Internal::Type::Boolean + + # @!attribute model_grader_parse_error + # + # @return [Boolean] + required :model_grader_parse_error, OpenAI::Internal::Type::Boolean + + # @!attribute model_grader_refusal_error + # + # @return [Boolean] + required :model_grader_refusal_error, OpenAI::Internal::Type::Boolean + + # @!attribute model_grader_server_error + # + # @return [Boolean] + required :model_grader_server_error, OpenAI::Internal::Type::Boolean + + # @!attribute model_grader_server_error_details + # + # @return [String, nil] + required :model_grader_server_error_details, String, nil?: true + + # @!attribute other_error + # + # @return [Boolean] + required :other_error, OpenAI::Internal::Type::Boolean + + # @!attribute python_grader_runtime_error + # + # @return [Boolean] + required :python_grader_runtime_error, OpenAI::Internal::Type::Boolean + + # @!attribute python_grader_runtime_error_details + # + # @return [String, nil] + required :python_grader_runtime_error_details, String, nil?: true + + # @!attribute python_grader_server_error + # + # @return [Boolean] + required :python_grader_server_error, OpenAI::Internal::Type::Boolean + + # @!attribute python_grader_server_error_type + # + # @return [String, nil] + required :python_grader_server_error_type, String, nil?: true + + # @!attribute sample_parse_error + # + # @return [Boolean] + required :sample_parse_error, OpenAI::Internal::Type::Boolean + + # @!attribute truncated_observation_error + # + # @return [Boolean] + required :truncated_observation_error, OpenAI::Internal::Type::Boolean + + # @!attribute unresponsive_reward_error + # + # @return [Boolean] + required :unresponsive_reward_error, OpenAI::Internal::Type::Boolean + + # @!method initialize(formula_parse_error:, invalid_variable_error:, model_grader_parse_error:, model_grader_refusal_error:, model_grader_server_error:, model_grader_server_error_details:, other_error:, python_grader_runtime_error:, python_grader_runtime_error_details:, python_grader_server_error:, python_grader_server_error_type:, sample_parse_error:, truncated_observation_error:, unresponsive_reward_error:) + # @param formula_parse_error [Boolean] + # @param invalid_variable_error [Boolean] + # @param model_grader_parse_error [Boolean] + # @param model_grader_refusal_error [Boolean] + # @param model_grader_server_error [Boolean] + # @param model_grader_server_error_details [String, nil] + # @param other_error [Boolean] + # @param python_grader_runtime_error [Boolean] + # @param python_grader_runtime_error_details [String, nil] + # @param python_grader_server_error [Boolean] + # @param python_grader_server_error_type [String, nil] + # @param sample_parse_error [Boolean] + # @param truncated_observation_error [Boolean] + # @param unresponsive_reward_error [Boolean] + end + end + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/alpha/grader_validate_params.rb b/lib/openai/models/fine_tuning/alpha/grader_validate_params.rb new file mode 100644 index 00000000..fb0650a0 --- /dev/null +++ b/lib/openai/models/fine_tuning/alpha/grader_validate_params.rb @@ -0,0 +1,49 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + module Alpha + # @see OpenAI::Resources::FineTuning::Alpha::Graders#validate + class GraderValidateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute grader + # The grader used for the fine-tuning job. + # + # @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] + required :grader, union: -> { OpenAI::FineTuning::Alpha::GraderValidateParams::Grader } + + # @!method initialize(grader:, request_options: {}) + # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # The grader used for the fine-tuning job. + module Grader + extend OpenAI::Internal::Type::Union + + # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + variant -> { OpenAI::Graders::StringCheckGrader } + + # A TextSimilarityGrader object which grades text based on similarity metrics. + variant -> { OpenAI::Graders::TextSimilarityGrader } + + # A PythonGrader object that runs a python script on the input. + variant -> { OpenAI::Graders::PythonGrader } + + # A ScoreModelGrader object that uses a model to assign a score to the input. + variant -> { OpenAI::Graders::ScoreModelGrader } + + # A MultiGrader object combines the output of multiple graders to produce a single score. + variant -> { OpenAI::Graders::MultiGrader } + + # @!method self.variants + # @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)] + end + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/alpha/grader_validate_response.rb b/lib/openai/models/fine_tuning/alpha/grader_validate_response.rb new file mode 100644 index 00000000..9d7458fc --- /dev/null +++ b/lib/openai/models/fine_tuning/alpha/grader_validate_response.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + module Alpha + # @see OpenAI::Resources::FineTuning::Alpha::Graders#validate + class GraderValidateResponse < OpenAI::Internal::Type::BaseModel + # @!attribute grader + # The grader used for the fine-tuning job. + # + # @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader, nil] + optional :grader, union: -> { OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::Grader } + + # @!method initialize(grader: nil) + # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job. + + # The grader used for the fine-tuning job. + # + # @see OpenAI::Models::FineTuning::Alpha::GraderValidateResponse#grader + module Grader + extend OpenAI::Internal::Type::Union + + # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + variant -> { OpenAI::Graders::StringCheckGrader } + + # A TextSimilarityGrader object which grades text based on similarity metrics. + variant -> { OpenAI::Graders::TextSimilarityGrader } + + # A PythonGrader object that runs a python script on the input. + variant -> { OpenAI::Graders::PythonGrader } + + # A ScoreModelGrader object that uses a model to assign a score to the input. + variant -> { OpenAI::Graders::ScoreModelGrader } + + # A MultiGrader object combines the output of multiple graders to produce a single score. + variant -> { OpenAI::Graders::MultiGrader } + + # @!method self.variants + # @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)] + end + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/checkpoints/permission_create_params.rb b/lib/openai/models/fine_tuning/checkpoints/permission_create_params.rb new file mode 100644 index 00000000..448911d4 --- /dev/null +++ b/lib/openai/models/fine_tuning/checkpoints/permission_create_params.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + module Checkpoints + # @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#create + class PermissionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute project_ids + # The project identifiers to grant access to. + # + # @return [Array] + required :project_ids, OpenAI::Internal::Type::ArrayOf[String] + + # @!method initialize(project_ids:, request_options: {}) + # @param project_ids [Array] The project identifiers to grant access to. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/checkpoints/permission_create_response.rb b/lib/openai/models/fine_tuning/checkpoints/permission_create_response.rb new file mode 100644 index 00000000..efe00e34 --- /dev/null +++ b/lib/openai/models/fine_tuning/checkpoints/permission_create_response.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + module Checkpoints + # @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#create + class PermissionCreateResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The permission identifier, which can be referenced in the API endpoints. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) for when the permission was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute object + # The object type, which is always "checkpoint.permission". + # + # @return [Symbol, :"checkpoint.permission"] + required :object, const: :"checkpoint.permission" + + # @!attribute project_id + # The project identifier that the permission is for. + # + # @return [String] + required :project_id, String + + # @!method initialize(id:, created_at:, project_id:, object: :"checkpoint.permission") + # The `checkpoint.permission` object represents a permission for a fine-tuned + # model checkpoint. + # + # @param id [String] The permission identifier, which can be referenced in the API endpoints. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the permission was created. + # + # @param project_id [String] The project identifier that the permission is for. + # + # @param object [Symbol, :"checkpoint.permission"] The object type, which is always "checkpoint.permission". + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/checkpoints/permission_delete_params.rb b/lib/openai/models/fine_tuning/checkpoints/permission_delete_params.rb new file mode 100644 index 00000000..402eb4c2 --- /dev/null +++ b/lib/openai/models/fine_tuning/checkpoints/permission_delete_params.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + module Checkpoints + # @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#delete + class PermissionDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute fine_tuned_model_checkpoint + # + # @return [String] + required :fine_tuned_model_checkpoint, String + + # @!method initialize(fine_tuned_model_checkpoint:, request_options: {}) + # @param fine_tuned_model_checkpoint [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/checkpoints/permission_delete_response.rb b/lib/openai/models/fine_tuning/checkpoints/permission_delete_response.rb new file mode 100644 index 00000000..483d4664 --- /dev/null +++ b/lib/openai/models/fine_tuning/checkpoints/permission_delete_response.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + module Checkpoints + # @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#delete + class PermissionDeleteResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The ID of the fine-tuned model checkpoint permission that was deleted. + # + # @return [String] + required :id, String + + # @!attribute deleted + # Whether the fine-tuned model checkpoint permission was successfully deleted. + # + # @return [Boolean] + required :deleted, OpenAI::Internal::Type::Boolean + + # @!attribute object + # The object type, which is always "checkpoint.permission". + # + # @return [Symbol, :"checkpoint.permission"] + required :object, const: :"checkpoint.permission" + + # @!method initialize(id:, deleted:, object: :"checkpoint.permission") + # @param id [String] The ID of the fine-tuned model checkpoint permission that was deleted. + # + # @param deleted [Boolean] Whether the fine-tuned model checkpoint permission was successfully deleted. + # + # @param object [Symbol, :"checkpoint.permission"] The object type, which is always "checkpoint.permission". + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_params.rb b/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_params.rb new file mode 100644 index 00000000..d49a0e2d --- /dev/null +++ b/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_params.rb @@ -0,0 +1,61 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + module Checkpoints + # @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#retrieve + class PermissionRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!attribute after + # Identifier for the last permission ID from the previous pagination request. + # + # @return [String, nil] + optional :after, String + + # @!attribute limit + # Number of permissions to retrieve. + # + # @return [Integer, nil] + optional :limit, Integer + + # @!attribute order + # The order in which to retrieve permissions. + # + # @return [Symbol, OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::Order, nil] + optional :order, enum: -> { OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order } + + # @!attribute project_id + # The ID of the project to get permissions for. + # + # @return [String, nil] + optional :project_id, String + + # @!method initialize(after: nil, limit: nil, order: nil, project_id: nil, request_options: {}) + # @param after [String] Identifier for the last permission ID from the previous pagination request. + # + # @param limit [Integer] Number of permissions to retrieve. + # + # @param order [Symbol, OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::Order] The order in which to retrieve permissions. + # + # @param project_id [String] The ID of the project to get permissions for. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # The order in which to retrieve permissions. + module Order + extend OpenAI::Internal::Type::Enum + + ASCENDING = :ascending + DESCENDING = :descending + + # @!method self.values + # @return [Array] + end + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb b/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb new file mode 100644 index 00000000..6ffbdf4d --- /dev/null +++ b/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + module Checkpoints + # @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#retrieve + class PermissionRetrieveResponse < OpenAI::Internal::Type::BaseModel + # @!attribute data + # + # @return [Array] + required :data, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data] } + + # @!attribute has_more + # + # @return [Boolean] + required :has_more, OpenAI::Internal::Type::Boolean + + # @!attribute object + # + # @return [Symbol, :list] + required :object, const: :list + + # @!attribute first_id + # + # @return [String, nil] + optional :first_id, String, nil?: true + + # @!attribute last_id + # + # @return [String, nil] + optional :last_id, String, nil?: true + + # @!method initialize(data:, has_more:, first_id: nil, last_id: nil, object: :list) + # @param data [Array] + # @param has_more [Boolean] + # @param first_id [String, nil] + # @param last_id [String, nil] + # @param object [Symbol, :list] + + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The permission identifier, which can be referenced in the API endpoints. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) for when the permission was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute object + # The object type, which is always "checkpoint.permission". + # + # @return [Symbol, :"checkpoint.permission"] + required :object, const: :"checkpoint.permission" + + # @!attribute project_id + # The project identifier that the permission is for. + # + # @return [String] + required :project_id, String + + # @!method initialize(id:, created_at:, project_id:, object: :"checkpoint.permission") + # The `checkpoint.permission` object represents a permission for a fine-tuned + # model checkpoint. + # + # @param id [String] The permission identifier, which can be referenced in the API endpoints. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the permission was created. + # + # @param project_id [String] The project identifier that the permission is for. + # + # @param object [Symbol, :"checkpoint.permission"] The object type, which is always "checkpoint.permission". + end + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/dpo_hyperparameters.rb b/lib/openai/models/fine_tuning/dpo_hyperparameters.rb new file mode 100644 index 00000000..e5dd0bfc --- /dev/null +++ b/lib/openai/models/fine_tuning/dpo_hyperparameters.rb @@ -0,0 +1,112 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + class DpoHyperparameters < OpenAI::Internal::Type::BaseModel + # @!attribute batch_size + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + # + # @return [Symbol, :auto, Integer, nil] + optional :batch_size, union: -> { OpenAI::FineTuning::DpoHyperparameters::BatchSize } + + # @!attribute beta + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. + # + # @return [Symbol, :auto, Float, nil] + optional :beta, union: -> { OpenAI::FineTuning::DpoHyperparameters::Beta } + + # @!attribute learning_rate_multiplier + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + # + # @return [Symbol, :auto, Float, nil] + optional :learning_rate_multiplier, + union: -> { OpenAI::FineTuning::DpoHyperparameters::LearningRateMultiplier } + + # @!attribute n_epochs + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + # + # @return [Symbol, :auto, Integer, nil] + optional :n_epochs, union: -> { OpenAI::FineTuning::DpoHyperparameters::NEpochs } + + # @!method initialize(batch_size: nil, beta: nil, learning_rate_multiplier: nil, n_epochs: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::DpoHyperparameters} for more details. + # + # The hyperparameters used for the DPO fine-tuning job. + # + # @param batch_size [Symbol, :auto, Integer] Number of examples in each batch. A larger batch size means that model parameter + # + # @param beta [Symbol, :auto, Float] The beta value for the DPO method. A higher beta value will increase the weight + # + # @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a + # + # @param n_epochs [Symbol, :auto, Integer] The number of epochs to train the model for. An epoch refers to one full cycle t + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + # + # @see OpenAI::Models::FineTuning::DpoHyperparameters#batch_size + module BatchSize + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] + end + + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. + # + # @see OpenAI::Models::FineTuning::DpoHyperparameters#beta + module Beta + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Float + + # @!method self.variants + # @return [Array(Symbol, :auto, Float)] + end + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + # + # @see OpenAI::Models::FineTuning::DpoHyperparameters#learning_rate_multiplier + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Float + + # @!method self.variants + # @return [Array(Symbol, :auto, Float)] + end + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + # + # @see OpenAI::Models::FineTuning::DpoHyperparameters#n_epochs + module NEpochs + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/dpo_method.rb b/lib/openai/models/fine_tuning/dpo_method.rb new file mode 100644 index 00000000..57bfe306 --- /dev/null +++ b/lib/openai/models/fine_tuning/dpo_method.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + class DpoMethod < OpenAI::Internal::Type::BaseModel + # @!attribute hyperparameters + # The hyperparameters used for the DPO fine-tuning job. + # + # @return [OpenAI::Models::FineTuning::DpoHyperparameters, nil] + optional :hyperparameters, -> { OpenAI::FineTuning::DpoHyperparameters } + + # @!method initialize(hyperparameters: nil) + # Configuration for the DPO fine-tuning method. + # + # @param hyperparameters [OpenAI::Models::FineTuning::DpoHyperparameters] The hyperparameters used for the DPO fine-tuning job. + end + end + end +end diff --git a/lib/openai/models/fine_tuning/fine_tuning_job.rb b/lib/openai/models/fine_tuning/fine_tuning_job.rb index 86996d90..dad79035 100644 --- a/lib/openai/models/fine_tuning/fine_tuning_job.rb +++ b/lib/openai/models/fine_tuning/fine_tuning_job.rb @@ -3,7 +3,8 @@ module OpenAI module Models module FineTuning - class FineTuningJob < OpenAI::BaseModel + # @see OpenAI::Resources::FineTuning::Jobs#create + class FineTuningJob < OpenAI::Internal::Type::BaseModel # @!attribute id # The object identifier, which can be referenced in the API endpoints. # @@ -18,31 +19,31 @@ class FineTuningJob < OpenAI::BaseModel # @!attribute error # For fine-tuning jobs that have `failed`, this will contain more information on - # the cause of the failure. + # the cause of the failure. # # @return [OpenAI::Models::FineTuning::FineTuningJob::Error, nil] - required :error, -> { OpenAI::Models::FineTuning::FineTuningJob::Error }, nil?: true + required :error, -> { OpenAI::FineTuning::FineTuningJob::Error }, nil?: true # @!attribute fine_tuned_model # The name of the fine-tuned model that is being created. The value will be null - # if the fine-tuning job is still running. + # if the fine-tuning job is still running. # # @return [String, nil] required :fine_tuned_model, String, nil?: true # @!attribute finished_at # The Unix timestamp (in seconds) for when the fine-tuning job was finished. The - # value will be null if the fine-tuning job is still running. + # value will be null if the fine-tuning job is still running. # # @return [Integer, nil] required :finished_at, Integer, nil?: true # @!attribute hyperparameters # The hyperparameters used for the fine-tuning job. This value will only be - # returned when running `supervised` jobs. + # returned when running `supervised` jobs. # # @return [OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters] - required :hyperparameters, -> { OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters } + required :hyperparameters, -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters } # @!attribute model # The base model that is being fine-tuned. @@ -64,11 +65,11 @@ class FineTuningJob < OpenAI::BaseModel # @!attribute result_files # The compiled results file ID(s) for the fine-tuning job. You can retrieve the - # results with the - # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + # results with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). # # @return [Array] - required :result_files, OpenAI::ArrayOf[String] + required :result_files, OpenAI::Internal::Type::ArrayOf[String] # @!attribute seed # The seed used for the fine-tuning job. @@ -78,36 +79,36 @@ class FineTuningJob < OpenAI::BaseModel # @!attribute status # The current status of the fine-tuning job, which can be either - # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. # # @return [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Status] - required :status, enum: -> { OpenAI::Models::FineTuning::FineTuningJob::Status } + required :status, enum: -> { OpenAI::FineTuning::FineTuningJob::Status } # @!attribute trained_tokens # The total number of billable tokens processed by this fine-tuning job. The value - # will be null if the fine-tuning job is still running. + # will be null if the fine-tuning job is still running. # # @return [Integer, nil] required :trained_tokens, Integer, nil?: true # @!attribute training_file # The file ID used for training. You can retrieve the training data with the - # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). # # @return [String] required :training_file, String # @!attribute validation_file # The file ID used for validation. You can retrieve the validation results with - # the - # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + # the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). # # @return [String, nil] required :validation_file, String, nil?: true # @!attribute estimated_finish # The Unix timestamp (in seconds) for when the fine-tuning job is estimated to - # finish. The value will be null if the fine-tuning job is not running. + # finish. The value will be null if the fine-tuning job is not running. # # @return [Integer, nil] optional :estimated_finish, Integer, nil?: true @@ -117,82 +118,75 @@ class FineTuningJob < OpenAI::BaseModel # # @return [Array, nil] optional :integrations, - -> { OpenAI::ArrayOf[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject] }, + -> { + OpenAI::Internal::Type::ArrayOf[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject] + }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!attribute [r] method_ + # @!attribute method_ # The method used for fine-tuning. # # @return [OpenAI::Models::FineTuning::FineTuningJob::Method, nil] - optional :method_, -> { OpenAI::Models::FineTuning::FineTuningJob::Method }, api_name: :method - - # @!parse - # # @return [OpenAI::Models::FineTuning::FineTuningJob::Method] - # attr_writer :method_ - - # @!parse - # # The `fine_tuning.job` object represents a fine-tuning job that has been created - # # through the API. - # # - # # @param id [String] - # # @param created_at [Integer] - # # @param error [OpenAI::Models::FineTuning::FineTuningJob::Error, nil] - # # @param fine_tuned_model [String, nil] - # # @param finished_at [Integer, nil] - # # @param hyperparameters [OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters] - # # @param model [String] - # # @param organization_id [String] - # # @param result_files [Array] - # # @param seed [Integer] - # # @param status [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Status] - # # @param trained_tokens [Integer, nil] - # # @param training_file [String] - # # @param validation_file [String, nil] - # # @param estimated_finish [Integer, nil] - # # @param integrations [Array, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param method_ [OpenAI::Models::FineTuning::FineTuningJob::Method] - # # @param object [Symbol, :"fine_tuning.job"] - # # - # def initialize( - # id:, - # created_at:, - # error:, - # fine_tuned_model:, - # finished_at:, - # hyperparameters:, - # model:, - # organization_id:, - # result_files:, - # seed:, - # status:, - # trained_tokens:, - # training_file:, - # validation_file:, - # estimated_finish: nil, - # integrations: nil, - # metadata: nil, - # method_: nil, - # object: :"fine_tuning.job", - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Error < OpenAI::BaseModel + optional :method_, -> { OpenAI::FineTuning::FineTuningJob::Method }, api_name: :method + + # @!method initialize(id:, created_at:, error:, fine_tuned_model:, finished_at:, hyperparameters:, model:, organization_id:, result_files:, seed:, status:, trained_tokens:, training_file:, validation_file:, estimated_finish: nil, integrations: nil, metadata: nil, method_: nil, object: :"fine_tuning.job") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::FineTuningJob} for more details. + # + # The `fine_tuning.job` object represents a fine-tuning job that has been created + # through the API. + # + # @param id [String] The object identifier, which can be referenced in the API endpoints. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the fine-tuning job was created. + # + # @param error [OpenAI::Models::FineTuning::FineTuningJob::Error, nil] For fine-tuning jobs that have `failed`, this will contain more information on t + # + # @param fine_tuned_model [String, nil] The name of the fine-tuned model that is being created. The value will be null i + # + # @param finished_at [Integer, nil] The Unix timestamp (in seconds) for when the fine-tuning job was finished. The v + # + # @param hyperparameters [OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters] The hyperparameters used for the fine-tuning job. This value will only be return + # + # @param model [String] The base model that is being fine-tuned. + # + # @param organization_id [String] The organization that owns the fine-tuning job. + # + # @param result_files [Array] The compiled results file ID(s) for the fine-tuning job. You can retrieve the re + # + # @param seed [Integer] The seed used for the fine-tuning job. + # + # @param status [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Status] The current status of the fine-tuning job, which can be either `validating_files + # + # @param trained_tokens [Integer, nil] The total number of billable tokens processed by this fine-tuning job. The value + # + # @param training_file [String] The file ID used for training. You can retrieve the training data with the [File + # + # @param validation_file [String, nil] The file ID used for validation. You can retrieve the validation results with th + # + # @param estimated_finish [Integer, nil] The Unix timestamp (in seconds) for when the fine-tuning job is estimated to fin + # + # @param integrations [Array, nil] A list of integrations to enable for this fine-tuning job. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param method_ [OpenAI::Models::FineTuning::FineTuningJob::Method] The method used for fine-tuning. + # + # @param object [Symbol, :"fine_tuning.job"] The object type, which is always "fine_tuning.job". + + # @see OpenAI::Models::FineTuning::FineTuningJob#error + class Error < OpenAI::Internal::Type::BaseModel # @!attribute code # A machine-readable error code. # @@ -207,107 +201,117 @@ class Error < OpenAI::BaseModel # @!attribute param # The parameter that was invalid, usually `training_file` or `validation_file`. - # This field will be null if the failure was not parameter-specific. + # This field will be null if the failure was not parameter-specific. # # @return [String, nil] required :param, String, nil?: true - # @!parse - # # For fine-tuning jobs that have `failed`, this will contain more information on - # # the cause of the failure. - # # - # # @param code [String] - # # @param message [String] - # # @param param [String, nil] - # # - # def initialize(code:, message:, param:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(code:, message:, param:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::FineTuningJob::Error} for more details. + # + # For fine-tuning jobs that have `failed`, this will contain more information on + # the cause of the failure. + # + # @param code [String] A machine-readable error code. + # + # @param message [String] A human-readable error message. + # + # @param param [String, nil] The parameter that was invalid, usually `training_file` or `validation_file`. Th end - class Hyperparameters < OpenAI::BaseModel - # @!attribute [r] batch_size + # @see OpenAI::Models::FineTuning::FineTuningJob#hyperparameters + class Hyperparameters < OpenAI::Internal::Type::BaseModel + # @!attribute batch_size # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @return [Symbol, :auto, Integer, nil] - optional :batch_size, union: -> { OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::BatchSize } + optional :batch_size, + union: -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters::BatchSize }, + nil?: true - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :batch_size - - # @!attribute [r] learning_rate_multiplier + # @!attribute learning_rate_multiplier # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @return [Symbol, :auto, Float, nil] optional :learning_rate_multiplier, - union: -> { OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::LearningRateMultiplier } + union: -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters::LearningRateMultiplier } - # @!parse - # # @return [Symbol, :auto, Float] - # attr_writer :learning_rate_multiplier - - # @!attribute [r] n_epochs + # @!attribute n_epochs # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @return [Symbol, :auto, Integer, nil] - optional :n_epochs, union: -> { OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::NEpochs } - - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :n_epochs - - # @!parse - # # The hyperparameters used for the fine-tuning job. This value will only be - # # returned when running `supervised` jobs. - # # - # # @param batch_size [Symbol, :auto, Integer] - # # @param learning_rate_multiplier [Symbol, :auto, Float] - # # @param n_epochs [Symbol, :auto, Integer] - # # - # def initialize(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil, **) = super + optional :n_epochs, union: -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters::NEpochs } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters} for more details. + # + # The hyperparameters used for the fine-tuning job. This value will only be + # returned when running `supervised` jobs. + # + # @param batch_size [Symbol, :auto, Integer, nil] Number of examples in each batch. A larger batch size means that model parameter # + # @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a + # + # @param n_epochs [Symbol, :auto, Integer] The number of epochs to train the model for. An epoch refers to one full cycle + # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. - class BatchSize < OpenAI::Union + # parameters are updated less frequently, but with lower variance. + # + # @see OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters#batch_size + module BatchSize + extend OpenAI::Internal::Type::Union + variant const: :auto variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] end - # @abstract - # # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. - class LearningRateMultiplier < OpenAI::Union + # avoid overfitting. + # + # @see OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters#learning_rate_multiplier + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + variant const: :auto variant Float + + # @!method self.variants + # @return [Array(Symbol, :auto, Float)] end - # @abstract - # # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. - class NEpochs < OpenAI::Union + # through the training dataset. + # + # @see OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters#n_epochs + module NEpochs + extend OpenAI::Internal::Type::Union + variant const: :auto variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] end end - # @abstract - # # The current status of the fine-tuning job, which can be either - # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. - class Status < OpenAI::Enum + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + # + # @see OpenAI::Models::FineTuning::FineTuningJob#status + module Status + extend OpenAI::Internal::Type::Enum + VALIDATING_FILES = :validating_files QUEUED = :queued RUNNING = :running @@ -315,283 +319,59 @@ class Status < OpenAI::Enum FAILED = :failed CANCELLED = :cancelled - finalize! + # @!method self.values + # @return [Array] end - class Method < OpenAI::BaseModel - # @!attribute [r] dpo + # @see OpenAI::Models::FineTuning::FineTuningJob#method_ + class Method < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + # + # @return [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Method::Type] + required :type, enum: -> { OpenAI::FineTuning::FineTuningJob::Method::Type } + + # @!attribute dpo # Configuration for the DPO fine-tuning method. # - # @return [OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo, nil] - optional :dpo, -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo } + # @return [OpenAI::Models::FineTuning::DpoMethod, nil] + optional :dpo, -> { OpenAI::FineTuning::DpoMethod } - # @!parse - # # @return [OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo] - # attr_writer :dpo + # @!attribute reinforcement + # Configuration for the reinforcement fine-tuning method. + # + # @return [OpenAI::Models::FineTuning::ReinforcementMethod, nil] + optional :reinforcement, -> { OpenAI::FineTuning::ReinforcementMethod } - # @!attribute [r] supervised + # @!attribute supervised # Configuration for the supervised fine-tuning method. # - # @return [OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised, nil] - optional :supervised, -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised } + # @return [OpenAI::Models::FineTuning::SupervisedMethod, nil] + optional :supervised, -> { OpenAI::FineTuning::SupervisedMethod } - # @!parse - # # @return [OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised] - # attr_writer :supervised - - # @!attribute [r] type - # The type of method. Is either `supervised` or `dpo`. + # @!method initialize(type:, dpo: nil, reinforcement: nil, supervised: nil) + # The method used for fine-tuning. # - # @return [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Method::Type, nil] - optional :type, enum: -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Type } - - # @!parse - # # @return [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Method::Type] - # attr_writer :type - - # @!parse - # # The method used for fine-tuning. - # # - # # @param dpo [OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo] - # # @param supervised [OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised] - # # @param type [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Method::Type] - # # - # def initialize(dpo: nil, supervised: nil, type: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Dpo < OpenAI::BaseModel - # @!attribute [r] hyperparameters - # The hyperparameters used for the fine-tuning job. - # - # @return [OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters, nil] - optional :hyperparameters, -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters } - - # @!parse - # # @return [OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters] - # attr_writer :hyperparameters - - # @!parse - # # Configuration for the DPO fine-tuning method. - # # - # # @param hyperparameters [OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters] - # # - # def initialize(hyperparameters: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Hyperparameters < OpenAI::BaseModel - # @!attribute [r] batch_size - # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. - # - # @return [Symbol, :auto, Integer, nil] - optional :batch_size, - union: -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::BatchSize } - - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :batch_size - - # @!attribute [r] beta - # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. - # - # @return [Symbol, :auto, Float, nil] - optional :beta, - union: -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::Beta } - - # @!parse - # # @return [Symbol, :auto, Float] - # attr_writer :beta - - # @!attribute [r] learning_rate_multiplier - # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. - # - # @return [Symbol, :auto, Float, nil] - optional :learning_rate_multiplier, - union: -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::LearningRateMultiplier } - - # @!parse - # # @return [Symbol, :auto, Float] - # attr_writer :learning_rate_multiplier - - # @!attribute [r] n_epochs - # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. - # - # @return [Symbol, :auto, Integer, nil] - optional :n_epochs, - union: -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::NEpochs } - - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :n_epochs - - # @!parse - # # The hyperparameters used for the fine-tuning job. - # # - # # @param batch_size [Symbol, :auto, Integer] - # # @param beta [Symbol, :auto, Float] - # # @param learning_rate_multiplier [Symbol, :auto, Float] - # # @param n_epochs [Symbol, :auto, Integer] - # # - # def initialize(batch_size: nil, beta: nil, learning_rate_multiplier: nil, n_epochs: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. - class BatchSize < OpenAI::Union - variant const: :auto - - variant Integer - end - - # @abstract - # - # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. - class Beta < OpenAI::Union - variant const: :auto - - variant Float - end - - # @abstract - # - # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. - class LearningRateMultiplier < OpenAI::Union - variant const: :auto - - variant Float - end - - # @abstract - # - # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. - class NEpochs < OpenAI::Union - variant const: :auto - - variant Integer - end - end - end - - class Supervised < OpenAI::BaseModel - # @!attribute [r] hyperparameters - # The hyperparameters used for the fine-tuning job. - # - # @return [OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters, nil] - optional :hyperparameters, - -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters } - - # @!parse - # # @return [OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters] - # attr_writer :hyperparameters - - # @!parse - # # Configuration for the supervised fine-tuning method. - # # - # # @param hyperparameters [OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters] - # # - # def initialize(hyperparameters: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Hyperparameters < OpenAI::BaseModel - # @!attribute [r] batch_size - # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. - # - # @return [Symbol, :auto, Integer, nil] - optional :batch_size, - union: -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::BatchSize } - - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :batch_size - - # @!attribute [r] learning_rate_multiplier - # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. - # - # @return [Symbol, :auto, Float, nil] - optional :learning_rate_multiplier, - union: -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::LearningRateMultiplier } - - # @!parse - # # @return [Symbol, :auto, Float] - # attr_writer :learning_rate_multiplier - - # @!attribute [r] n_epochs - # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. - # - # @return [Symbol, :auto, Integer, nil] - optional :n_epochs, - union: -> { OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::NEpochs } - - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :n_epochs - - # @!parse - # # The hyperparameters used for the fine-tuning job. - # # - # # @param batch_size [Symbol, :auto, Integer] - # # @param learning_rate_multiplier [Symbol, :auto, Float] - # # @param n_epochs [Symbol, :auto, Integer] - # # - # def initialize(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. - class BatchSize < OpenAI::Union - variant const: :auto - - variant Integer - end - - # @abstract - # - # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. - class LearningRateMultiplier < OpenAI::Union - variant const: :auto - - variant Float - end - - # @abstract - # - # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. - class NEpochs < OpenAI::Union - variant const: :auto - - variant Integer - end - end - end + # @param type [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Method::Type] The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + # + # @param dpo [OpenAI::Models::FineTuning::DpoMethod] Configuration for the DPO fine-tuning method. + # + # @param reinforcement [OpenAI::Models::FineTuning::ReinforcementMethod] Configuration for the reinforcement fine-tuning method. + # + # @param supervised [OpenAI::Models::FineTuning::SupervisedMethod] Configuration for the supervised fine-tuning method. - # @abstract + # The type of method. Is either `supervised`, `dpo`, or `reinforcement`. # - # The type of method. Is either `supervised` or `dpo`. - class Type < OpenAI::Enum + # @see OpenAI::Models::FineTuning::FineTuningJob::Method#type + module Type + extend OpenAI::Internal::Type::Enum + SUPERVISED = :supervised DPO = :dpo + REINFORCEMENT = :reinforcement - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/fine_tuning/fine_tuning_job_event.rb b/lib/openai/models/fine_tuning/fine_tuning_job_event.rb index 0688b7a8..d609e035 100644 --- a/lib/openai/models/fine_tuning/fine_tuning_job_event.rb +++ b/lib/openai/models/fine_tuning/fine_tuning_job_event.rb @@ -3,7 +3,8 @@ module OpenAI module Models module FineTuning - class FineTuningJobEvent < OpenAI::BaseModel + # @see OpenAI::Resources::FineTuning::Jobs#list_events + class FineTuningJobEvent < OpenAI::Internal::Type::BaseModel # @!attribute id # The object identifier. # @@ -20,7 +21,7 @@ class FineTuningJobEvent < OpenAI::BaseModel # The log level of the event. # # @return [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Level] - required :level, enum: -> { OpenAI::Models::FineTuning::FineTuningJobEvent::Level } + required :level, enum: -> { OpenAI::FineTuning::FineTuningJobEvent::Level } # @!attribute message # The message of the event. @@ -34,60 +35,60 @@ class FineTuningJobEvent < OpenAI::BaseModel # @return [Symbol, :"fine_tuning.job.event"] required :object, const: :"fine_tuning.job.event" - # @!attribute [r] data + # @!attribute data # The data associated with the event. # # @return [Object, nil] - optional :data, OpenAI::Unknown + optional :data, OpenAI::Internal::Type::Unknown - # @!parse - # # @return [Object] - # attr_writer :data - - # @!attribute [r] type + # @!attribute type # The type of event. # # @return [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Type, nil] - optional :type, enum: -> { OpenAI::Models::FineTuning::FineTuningJobEvent::Type } - - # @!parse - # # @return [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Type] - # attr_writer :type - - # @!parse - # # Fine-tuning job event object - # # - # # @param id [String] - # # @param created_at [Integer] - # # @param level [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Level] - # # @param message [String] - # # @param data [Object] - # # @param type [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Type] - # # @param object [Symbol, :"fine_tuning.job.event"] - # # - # def initialize(id:, created_at:, level:, message:, data: nil, type: nil, object: :"fine_tuning.job.event", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :type, enum: -> { OpenAI::FineTuning::FineTuningJobEvent::Type } - # @abstract + # @!method initialize(id:, created_at:, level:, message:, data: nil, type: nil, object: :"fine_tuning.job.event") + # Fine-tuning job event object + # + # @param id [String] The object identifier. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the fine-tuning job was created. + # + # @param level [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Level] The log level of the event. + # + # @param message [String] The message of the event. # + # @param data [Object] The data associated with the event. + # + # @param type [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Type] The type of event. + # + # @param object [Symbol, :"fine_tuning.job.event"] The object type, which is always "fine_tuning.job.event". + # The log level of the event. - class Level < OpenAI::Enum + # + # @see OpenAI::Models::FineTuning::FineTuningJobEvent#level + module Level + extend OpenAI::Internal::Type::Enum + INFO = :info WARN = :warn ERROR = :error - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The type of event. - class Type < OpenAI::Enum + # + # @see OpenAI::Models::FineTuning::FineTuningJobEvent#type + module Type + extend OpenAI::Internal::Type::Enum + MESSAGE = :message METRICS = :metrics - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rb b/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rb index 3d4ee69a..e9e7c30d 100644 --- a/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rb +++ b/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rb @@ -3,7 +3,7 @@ module OpenAI module Models module FineTuning - class FineTuningJobWandbIntegration < OpenAI::BaseModel + class FineTuningJobWandbIntegration < OpenAI::Internal::Type::BaseModel # @!attribute project # The name of the project that the new run will be created under. # @@ -12,45 +12,43 @@ class FineTuningJobWandbIntegration < OpenAI::BaseModel # @!attribute entity # The entity to use for the run. This allows you to set the team or username of - # the WandB user that you would like associated with the run. If not set, the - # default entity for the registered WandB API key is used. + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. # # @return [String, nil] optional :entity, String, nil?: true # @!attribute name # A display name to set for the run. If not set, we will use the Job ID as the - # name. + # name. # # @return [String, nil] optional :name, String, nil?: true - # @!attribute [r] tags + # @!attribute tags # A list of tags to be attached to the newly created run. These tags are passed - # through directly to WandB. Some default tags are generated by OpenAI: - # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". # # @return [Array, nil] - optional :tags, OpenAI::ArrayOf[String] + optional :tags, OpenAI::Internal::Type::ArrayOf[String] - # @!parse - # # @return [Array] - # attr_writer :tags - - # @!parse - # # The settings for your integration with Weights and Biases. This payload - # # specifies the project that metrics will be sent to. Optionally, you can set an - # # explicit display name for your run, add tags to your run, and set a default - # # entity (team, username, etc) to be associated with your run. - # # - # # @param project [String] - # # @param entity [String, nil] - # # @param name [String, nil] - # # @param tags [Array] - # # - # def initialize(project:, entity: nil, name: nil, tags: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(project:, entity: nil, name: nil, tags: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::FineTuningJobWandbIntegration} for more details. + # + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. + # + # @param project [String] The name of the project that the new run will be created under. + # + # @param entity [String, nil] The entity to use for the run. This allows you to set the team or username of th + # + # @param name [String, nil] A display name to set for the run. If not set, we will use the Job ID as the nam + # + # @param tags [Array] A list of tags to be attached to the newly created run. These tags are passed th end end diff --git a/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rb b/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rb index 9d1a4377..54781dde 100644 --- a/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rb +++ b/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rb @@ -3,7 +3,7 @@ module OpenAI module Models module FineTuning - class FineTuningJobWandbIntegrationObject < OpenAI::BaseModel + class FineTuningJobWandbIntegrationObject < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of the integration being enabled for the fine-tuning job # @@ -12,20 +12,21 @@ class FineTuningJobWandbIntegrationObject < OpenAI::BaseModel # @!attribute wandb # The settings for your integration with Weights and Biases. This payload - # specifies the project that metrics will be sent to. Optionally, you can set an - # explicit display name for your run, add tags to your run, and set a default - # entity (team, username, etc) to be associated with your run. + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. # # @return [OpenAI::Models::FineTuning::FineTuningJobWandbIntegration] - required :wandb, -> { OpenAI::Models::FineTuning::FineTuningJobWandbIntegration } + required :wandb, -> { OpenAI::FineTuning::FineTuningJobWandbIntegration } - # @!parse - # # @param wandb [OpenAI::Models::FineTuning::FineTuningJobWandbIntegration] - # # @param type [Symbol, :wandb] - # # - # def initialize(wandb:, type: :wandb, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(wandb:, type: :wandb) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject} for more + # details. + # + # @param wandb [OpenAI::Models::FineTuning::FineTuningJobWandbIntegration] The settings for your integration with Weights and Biases. This payload specifie + # + # @param type [Symbol, :wandb] The type of the integration being enabled for the fine-tuning job end end diff --git a/lib/openai/models/fine_tuning/job_cancel_params.rb b/lib/openai/models/fine_tuning/job_cancel_params.rb index 7427d15d..4f4cf0b2 100644 --- a/lib/openai/models/fine_tuning/job_cancel_params.rb +++ b/lib/openai/models/fine_tuning/job_cancel_params.rb @@ -3,17 +3,13 @@ module OpenAI module Models module FineTuning - class JobCancelParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::FineTuning::Jobs#cancel + class JobCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/fine_tuning/job_create_params.rb b/lib/openai/models/fine_tuning/job_create_params.rb index 926e7f82..1d4258c1 100644 --- a/lib/openai/models/fine_tuning/job_create_params.rb +++ b/lib/openai/models/fine_tuning/job_create_params.rb @@ -3,94 +3,89 @@ module OpenAI module Models module FineTuning - class JobCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::FineTuning::Jobs#create + class JobCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute model # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). # - # @return [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model::Preset] - required :model, union: -> { OpenAI::Models::FineTuning::JobCreateParams::Model } + # @return [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model] + required :model, union: -> { OpenAI::FineTuning::JobCreateParams::Model } # @!attribute training_file # The ID of an uploaded file that contains training data. # - # See [upload file](https://platform.openai.com/docs/api-reference/files/create) - # for how to upload a file. + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. # - # Your dataset must be formatted as a JSONL file. Additionally, you must upload - # your file with the purpose `fine-tune`. + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. # - # The contents of the file should differ depending on if the model uses the - # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), - # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - # format, or if the fine-tuning method uses the - # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) - # format. + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. # - # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - # for more details. + # See the + # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) + # for more details. # # @return [String] required :training_file, String - # @!attribute [r] hyperparameters + # @!attribute hyperparameters + # @deprecated + # # The hyperparameters used for the fine-tuning job. This value is now deprecated - # in favor of `method`, and should be passed in under the `method` parameter. + # in favor of `method`, and should be passed in under the `method` parameter. # # @return [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters, nil] - optional :hyperparameters, -> { OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters } - - # @!parse - # # @return [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters] - # attr_writer :hyperparameters + optional :hyperparameters, -> { OpenAI::FineTuning::JobCreateParams::Hyperparameters } # @!attribute integrations # A list of integrations to enable for your fine-tuning job. # # @return [Array, nil] optional :integrations, - -> { OpenAI::ArrayOf[OpenAI::Models::FineTuning::JobCreateParams::Integration] }, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::FineTuning::JobCreateParams::Integration] }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!attribute [r] method_ + # @!attribute method_ # The method used for fine-tuning. # # @return [OpenAI::Models::FineTuning::JobCreateParams::Method, nil] - optional :method_, -> { OpenAI::Models::FineTuning::JobCreateParams::Method }, api_name: :method - - # @!parse - # # @return [OpenAI::Models::FineTuning::JobCreateParams::Method] - # attr_writer :method_ + optional :method_, -> { OpenAI::FineTuning::JobCreateParams::Method }, api_name: :method # @!attribute seed # The seed controls the reproducibility of the job. Passing in the same seed and - # job parameters should produce the same results, but may differ in rare cases. If - # a seed is not specified, one will be generated for you. + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. # # @return [Integer, nil] optional :seed, Integer, nil?: true # @!attribute suffix # A string of up to 64 characters that will be added to your fine-tuned model - # name. + # name. # - # For example, a `suffix` of "custom-model-name" would produce a model name like - # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. # # @return [String, nil] optional :suffix, String, nil?: true @@ -98,182 +93,187 @@ class JobCreateParams < OpenAI::BaseModel # @!attribute validation_file # The ID of an uploaded file that contains validation data. # - # If you provide this file, the data is used to generate validation metrics - # periodically during fine-tuning. These metrics can be viewed in the fine-tuning - # results file. The same data should not be present in both train and validation - # files. + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. # - # Your dataset must be formatted as a JSONL file. You must upload your file with - # the purpose `fine-tune`. + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. # - # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - # for more details. + # See the + # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) + # for more details. # # @return [String, nil] optional :validation_file, String, nil?: true - # @!parse - # # @param model [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model::Preset] - # # @param training_file [String] - # # @param hyperparameters [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters] - # # @param integrations [Array, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param method_ [OpenAI::Models::FineTuning::JobCreateParams::Method] - # # @param seed [Integer, nil] - # # @param suffix [String, nil] - # # @param validation_file [String, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # model:, - # training_file:, - # hyperparameters: nil, - # integrations: nil, - # metadata: nil, - # method_: nil, - # seed: nil, - # suffix: nil, - # validation_file: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(model:, training_file:, hyperparameters: nil, integrations: nil, metadata: nil, method_: nil, seed: nil, suffix: nil, validation_file: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobCreateParams} for more details. + # + # @param model [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model] The name of the model to fine-tune. You can select one of the + # + # @param training_file [String] The ID of an uploaded file that contains training data. + # + # @param hyperparameters [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters] The hyperparameters used for the fine-tuning job. + # + # @param integrations [Array, nil] A list of integrations to enable for your fine-tuning job. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param method_ [OpenAI::Models::FineTuning::JobCreateParams::Method] The method used for fine-tuning. # + # @param seed [Integer, nil] The seed controls the reproducibility of the job. Passing in the same seed and j + # + # @param suffix [String, nil] A string of up to 64 characters that will be added to your fine-tuned model name + # + # @param validation_file [String, nil] The ID of an uploaded file that contains validation data. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). - class Model < OpenAI::Union + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + module Model + extend OpenAI::Internal::Type::Union + variant String - # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). - variant enum: -> { OpenAI::Models::FineTuning::JobCreateParams::Model::Preset } + variant const: -> { OpenAI::Models::FineTuning::JobCreateParams::Model::BABBAGE_002 } - # @abstract - # - # The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). - class Preset < OpenAI::Enum - BABBAGE_002 = :"babbage-002" - DAVINCI_002 = :"davinci-002" - GPT_3_5_TURBO = :"gpt-3.5-turbo" - GPT_4O_MINI = :"gpt-4o-mini" - - finalize! + variant const: -> { OpenAI::Models::FineTuning::JobCreateParams::Model::DAVINCI_002 } + + variant const: -> { OpenAI::Models::FineTuning::JobCreateParams::Model::GPT_3_5_TURBO } + + variant const: -> { OpenAI::Models::FineTuning::JobCreateParams::Model::GPT_4O_MINI } + + # @!method self.variants + # @return [Array(String, Symbol)] + + define_sorbet_constant!(:Variants) do + T.type_alias { T.any(String, OpenAI::FineTuning::JobCreateParams::Model::TaggedSymbol) } end + + # @!group + + BABBAGE_002 = :"babbage-002" + DAVINCI_002 = :"davinci-002" + GPT_3_5_TURBO = :"gpt-3.5-turbo" + GPT_4O_MINI = :"gpt-4o-mini" + + # @!endgroup end # @deprecated - # - class Hyperparameters < OpenAI::BaseModel - # @!attribute [r] batch_size + class Hyperparameters < OpenAI::Internal::Type::BaseModel + # @!attribute batch_size # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. + # parameters are updated less frequently, but with lower variance. # # @return [Symbol, :auto, Integer, nil] - optional :batch_size, - union: -> { OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::BatchSize } + optional :batch_size, union: -> { OpenAI::FineTuning::JobCreateParams::Hyperparameters::BatchSize } - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :batch_size - - # @!attribute [r] learning_rate_multiplier + # @!attribute learning_rate_multiplier # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. + # avoid overfitting. # # @return [Symbol, :auto, Float, nil] optional :learning_rate_multiplier, - union: -> { OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::LearningRateMultiplier } + union: -> { OpenAI::FineTuning::JobCreateParams::Hyperparameters::LearningRateMultiplier } - # @!parse - # # @return [Symbol, :auto, Float] - # attr_writer :learning_rate_multiplier - - # @!attribute [r] n_epochs + # @!attribute n_epochs # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. + # through the training dataset. # # @return [Symbol, :auto, Integer, nil] - optional :n_epochs, union: -> { OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::NEpochs } - - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :n_epochs - - # @!parse - # # The hyperparameters used for the fine-tuning job. This value is now deprecated - # # in favor of `method`, and should be passed in under the `method` parameter. - # # - # # @param batch_size [Symbol, :auto, Integer] - # # @param learning_rate_multiplier [Symbol, :auto, Float] - # # @param n_epochs [Symbol, :auto, Integer] - # # - # def initialize(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :n_epochs, union: -> { OpenAI::FineTuning::JobCreateParams::Hyperparameters::NEpochs } - # @abstract + # @!method initialize(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters} for more details. # + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. + # + # @param batch_size [Symbol, :auto, Integer] Number of examples in each batch. A larger batch size means that model parameter + # + # @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a + # + # @param n_epochs [Symbol, :auto, Integer] The number of epochs to train the model for. An epoch refers to one full cycle + # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. - class BatchSize < OpenAI::Union + # parameters are updated less frequently, but with lower variance. + # + # @see OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters#batch_size + module BatchSize + extend OpenAI::Internal::Type::Union + variant const: :auto variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] end - # @abstract - # # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. - class LearningRateMultiplier < OpenAI::Union + # avoid overfitting. + # + # @see OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters#learning_rate_multiplier + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + variant const: :auto variant Float + + # @!method self.variants + # @return [Array(Symbol, :auto, Float)] end - # @abstract - # # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. - class NEpochs < OpenAI::Union + # through the training dataset. + # + # @see OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters#n_epochs + module NEpochs + extend OpenAI::Internal::Type::Union + variant const: :auto variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] end end - class Integration < OpenAI::BaseModel + class Integration < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of integration to enable. Currently, only "wandb" (Weights and Biases) - # is supported. + # is supported. # # @return [Symbol, :wandb] required :type, const: :wandb # @!attribute wandb # The settings for your integration with Weights and Biases. This payload - # specifies the project that metrics will be sent to. Optionally, you can set an - # explicit display name for your run, add tags to your run, and set a default - # entity (team, username, etc) to be associated with your run. + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. # # @return [OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb] - required :wandb, -> { OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb } - - # @!parse - # # @param wandb [OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb] - # # @param type [Symbol, :wandb] - # # - # def initialize(wandb:, type: :wandb, **) = super + required :wandb, -> { OpenAI::FineTuning::JobCreateParams::Integration::Wandb } - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(wandb:, type: :wandb) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobCreateParams::Integration} for more details. + # + # @param wandb [OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb] The settings for your integration with Weights and Biases. This payload specifie + # + # @param type [Symbol, :wandb] The type of integration to enable. Currently, only "wandb" (Weights and Biases) - class Wandb < OpenAI::BaseModel + # @see OpenAI::Models::FineTuning::JobCreateParams::Integration#wandb + class Wandb < OpenAI::Internal::Type::BaseModel # @!attribute project # The name of the project that the new run will be created under. # @@ -282,323 +282,95 @@ class Wandb < OpenAI::BaseModel # @!attribute entity # The entity to use for the run. This allows you to set the team or username of - # the WandB user that you would like associated with the run. If not set, the - # default entity for the registered WandB API key is used. + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. # # @return [String, nil] optional :entity, String, nil?: true # @!attribute name # A display name to set for the run. If not set, we will use the Job ID as the - # name. + # name. # # @return [String, nil] optional :name, String, nil?: true - # @!attribute [r] tags + # @!attribute tags # A list of tags to be attached to the newly created run. These tags are passed - # through directly to WandB. Some default tags are generated by OpenAI: - # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". # # @return [Array, nil] - optional :tags, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :tags - - # @!parse - # # The settings for your integration with Weights and Biases. This payload - # # specifies the project that metrics will be sent to. Optionally, you can set an - # # explicit display name for your run, add tags to your run, and set a default - # # entity (team, username, etc) to be associated with your run. - # # - # # @param project [String] - # # @param entity [String, nil] - # # @param name [String, nil] - # # @param tags [Array] - # # - # def initialize(project:, entity: nil, name: nil, tags: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :tags, OpenAI::Internal::Type::ArrayOf[String] + + # @!method initialize(project:, entity: nil, name: nil, tags: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb} for more + # details. + # + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. + # + # @param project [String] The name of the project that the new run will be created under. + # + # @param entity [String, nil] The entity to use for the run. This allows you to set the team or username of th + # + # @param name [String, nil] A display name to set for the run. If not set, we will use the Job ID as the nam + # + # @param tags [Array] A list of tags to be attached to the newly created run. These tags are passed th end end - class Method < OpenAI::BaseModel - # @!attribute [r] dpo + class Method < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + # + # @return [Symbol, OpenAI::Models::FineTuning::JobCreateParams::Method::Type] + required :type, enum: -> { OpenAI::FineTuning::JobCreateParams::Method::Type } + + # @!attribute dpo # Configuration for the DPO fine-tuning method. # - # @return [OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo, nil] - optional :dpo, -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo } + # @return [OpenAI::Models::FineTuning::DpoMethod, nil] + optional :dpo, -> { OpenAI::FineTuning::DpoMethod } - # @!parse - # # @return [OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo] - # attr_writer :dpo + # @!attribute reinforcement + # Configuration for the reinforcement fine-tuning method. + # + # @return [OpenAI::Models::FineTuning::ReinforcementMethod, nil] + optional :reinforcement, -> { OpenAI::FineTuning::ReinforcementMethod } - # @!attribute [r] supervised + # @!attribute supervised # Configuration for the supervised fine-tuning method. # - # @return [OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised, nil] - optional :supervised, -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised } - - # @!parse - # # @return [OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised] - # attr_writer :supervised + # @return [OpenAI::Models::FineTuning::SupervisedMethod, nil] + optional :supervised, -> { OpenAI::FineTuning::SupervisedMethod } - # @!attribute [r] type - # The type of method. Is either `supervised` or `dpo`. + # @!method initialize(type:, dpo: nil, reinforcement: nil, supervised: nil) + # The method used for fine-tuning. # - # @return [Symbol, OpenAI::Models::FineTuning::JobCreateParams::Method::Type, nil] - optional :type, enum: -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Type } - - # @!parse - # # @return [Symbol, OpenAI::Models::FineTuning::JobCreateParams::Method::Type] - # attr_writer :type - - # @!parse - # # The method used for fine-tuning. - # # - # # @param dpo [OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo] - # # @param supervised [OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised] - # # @param type [Symbol, OpenAI::Models::FineTuning::JobCreateParams::Method::Type] - # # - # def initialize(dpo: nil, supervised: nil, type: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Dpo < OpenAI::BaseModel - # @!attribute [r] hyperparameters - # The hyperparameters used for the fine-tuning job. - # - # @return [OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters, nil] - optional :hyperparameters, - -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters } - - # @!parse - # # @return [OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters] - # attr_writer :hyperparameters - - # @!parse - # # Configuration for the DPO fine-tuning method. - # # - # # @param hyperparameters [OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters] - # # - # def initialize(hyperparameters: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Hyperparameters < OpenAI::BaseModel - # @!attribute [r] batch_size - # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. - # - # @return [Symbol, :auto, Integer, nil] - optional :batch_size, - union: -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::BatchSize } - - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :batch_size - - # @!attribute [r] beta - # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. - # - # @return [Symbol, :auto, Float, nil] - optional :beta, - union: -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::Beta } - - # @!parse - # # @return [Symbol, :auto, Float] - # attr_writer :beta - - # @!attribute [r] learning_rate_multiplier - # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. - # - # @return [Symbol, :auto, Float, nil] - optional :learning_rate_multiplier, - union: -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::LearningRateMultiplier } - - # @!parse - # # @return [Symbol, :auto, Float] - # attr_writer :learning_rate_multiplier - - # @!attribute [r] n_epochs - # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. - # - # @return [Symbol, :auto, Integer, nil] - optional :n_epochs, - union: -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::NEpochs } - - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :n_epochs - - # @!parse - # # The hyperparameters used for the fine-tuning job. - # # - # # @param batch_size [Symbol, :auto, Integer] - # # @param beta [Symbol, :auto, Float] - # # @param learning_rate_multiplier [Symbol, :auto, Float] - # # @param n_epochs [Symbol, :auto, Integer] - # # - # def initialize(batch_size: nil, beta: nil, learning_rate_multiplier: nil, n_epochs: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. - class BatchSize < OpenAI::Union - variant const: :auto - - variant Integer - end - - # @abstract - # - # The beta value for the DPO method. A higher beta value will increase the weight - # of the penalty between the policy and reference model. - class Beta < OpenAI::Union - variant const: :auto - - variant Float - end - - # @abstract - # - # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. - class LearningRateMultiplier < OpenAI::Union - variant const: :auto - - variant Float - end - - # @abstract - # - # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. - class NEpochs < OpenAI::Union - variant const: :auto - - variant Integer - end - end - end - - class Supervised < OpenAI::BaseModel - # @!attribute [r] hyperparameters - # The hyperparameters used for the fine-tuning job. - # - # @return [OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters, nil] - optional :hyperparameters, - -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters } - - # @!parse - # # @return [OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters] - # attr_writer :hyperparameters - - # @!parse - # # Configuration for the supervised fine-tuning method. - # # - # # @param hyperparameters [OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters] - # # - # def initialize(hyperparameters: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Hyperparameters < OpenAI::BaseModel - # @!attribute [r] batch_size - # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. - # - # @return [Symbol, :auto, Integer, nil] - optional :batch_size, - union: -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::BatchSize } - - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :batch_size - - # @!attribute [r] learning_rate_multiplier - # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. - # - # @return [Symbol, :auto, Float, nil] - optional :learning_rate_multiplier, - union: -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::LearningRateMultiplier } - - # @!parse - # # @return [Symbol, :auto, Float] - # attr_writer :learning_rate_multiplier - - # @!attribute [r] n_epochs - # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. - # - # @return [Symbol, :auto, Integer, nil] - optional :n_epochs, - union: -> { OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::NEpochs } - - # @!parse - # # @return [Symbol, :auto, Integer] - # attr_writer :n_epochs - - # @!parse - # # The hyperparameters used for the fine-tuning job. - # # - # # @param batch_size [Symbol, :auto, Integer] - # # @param learning_rate_multiplier [Symbol, :auto, Float] - # # @param n_epochs [Symbol, :auto, Integer] - # # - # def initialize(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # Number of examples in each batch. A larger batch size means that model - # parameters are updated less frequently, but with lower variance. - class BatchSize < OpenAI::Union - variant const: :auto - - variant Integer - end - - # @abstract - # - # Scaling factor for the learning rate. A smaller learning rate may be useful to - # avoid overfitting. - class LearningRateMultiplier < OpenAI::Union - variant const: :auto - - variant Float - end - - # @abstract - # - # The number of epochs to train the model for. An epoch refers to one full cycle - # through the training dataset. - class NEpochs < OpenAI::Union - variant const: :auto - - variant Integer - end - end - end + # @param type [Symbol, OpenAI::Models::FineTuning::JobCreateParams::Method::Type] The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + # + # @param dpo [OpenAI::Models::FineTuning::DpoMethod] Configuration for the DPO fine-tuning method. + # + # @param reinforcement [OpenAI::Models::FineTuning::ReinforcementMethod] Configuration for the reinforcement fine-tuning method. + # + # @param supervised [OpenAI::Models::FineTuning::SupervisedMethod] Configuration for the supervised fine-tuning method. - # @abstract + # The type of method. Is either `supervised`, `dpo`, or `reinforcement`. # - # The type of method. Is either `supervised` or `dpo`. - class Type < OpenAI::Enum + # @see OpenAI::Models::FineTuning::JobCreateParams::Method#type + module Type + extend OpenAI::Internal::Type::Enum + SUPERVISED = :supervised DPO = :dpo + REINFORCEMENT = :reinforcement - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/fine_tuning/job_list_events_params.rb b/lib/openai/models/fine_tuning/job_list_events_params.rb index ad4de4f5..1e911e0f 100644 --- a/lib/openai/models/fine_tuning/job_list_events_params.rb +++ b/lib/openai/models/fine_tuning/job_list_events_params.rb @@ -3,39 +3,29 @@ module OpenAI module Models module FineTuning - class JobListEventsParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::FineTuning::Jobs#list_events + class JobListEventsParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # Identifier for the last event from the previous pagination request. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] limit + # @!attribute limit # Number of events to retrieve. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!parse - # # @param after [String] - # # @param limit [Integer] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, limit: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(after: nil, limit: nil, request_options: {}) + # @param after [String] Identifier for the last event from the previous pagination request. + # + # @param limit [Integer] Number of events to retrieve. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/fine_tuning/job_list_params.rb b/lib/openai/models/fine_tuning/job_list_params.rb index 6c53cafd..30c6f937 100644 --- a/lib/openai/models/fine_tuning/job_list_params.rb +++ b/lib/openai/models/fine_tuning/job_list_params.rb @@ -3,47 +3,41 @@ module OpenAI module Models module FineTuning - class JobListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::FineTuning::Jobs#list + class JobListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # Identifier for the last job from the previous pagination request. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] limit + # @!attribute limit # Number of fine-tuning jobs to retrieve. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - # @!attribute metadata # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. - # Alternatively, set `metadata=null` to indicate no metadata. + # Alternatively, set `metadata=null` to indicate no metadata. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!parse - # # @param after [String] - # # @param limit [Integer] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, limit: nil, metadata: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(after: nil, limit: nil, metadata: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobListParams} for more details. + # + # @param after [String] Identifier for the last job from the previous pagination request. + # + # @param limit [Integer] Number of fine-tuning jobs to retrieve. + # + # @param metadata [Hash{Symbol=>String}, nil] Optional metadata filter. To filter, use the syntax `metadata[k]=v`. Alternative + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/fine_tuning/job_pause_params.rb b/lib/openai/models/fine_tuning/job_pause_params.rb new file mode 100644 index 00000000..739a90da --- /dev/null +++ b/lib/openai/models/fine_tuning/job_pause_params.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + # @see OpenAI::Resources::FineTuning::Jobs#pause + class JobPauseParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/fine_tuning/job_resume_params.rb b/lib/openai/models/fine_tuning/job_resume_params.rb new file mode 100644 index 00000000..97d4c317 --- /dev/null +++ b/lib/openai/models/fine_tuning/job_resume_params.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + # @see OpenAI::Resources::FineTuning::Jobs#resume + class JobResumeParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/fine_tuning/job_retrieve_params.rb b/lib/openai/models/fine_tuning/job_retrieve_params.rb index 5808c0a0..9d8a5b18 100644 --- a/lib/openai/models/fine_tuning/job_retrieve_params.rb +++ b/lib/openai/models/fine_tuning/job_retrieve_params.rb @@ -3,17 +3,13 @@ module OpenAI module Models module FineTuning - class JobRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::FineTuning::Jobs#retrieve + class JobRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rb b/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rb index ef0571df..024df578 100644 --- a/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rb +++ b/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rb @@ -4,39 +4,29 @@ module OpenAI module Models module FineTuning module Jobs - class CheckpointListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::FineTuning::Jobs::Checkpoints#list + class CheckpointListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # Identifier for the last checkpoint ID from the previous pagination request. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] limit + # @!attribute limit # Number of checkpoints to retrieve. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!parse - # # @param after [String] - # # @param limit [Integer] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, limit: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(after: nil, limit: nil, request_options: {}) + # @param after [String] Identifier for the last checkpoint ID from the previous pagination request. + # + # @param limit [Integer] Number of checkpoints to retrieve. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rb b/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rb index 11dfff02..cb4e4a9b 100644 --- a/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rb +++ b/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rb @@ -4,7 +4,8 @@ module OpenAI module Models module FineTuning module Jobs - class FineTuningJobCheckpoint < OpenAI::BaseModel + # @see OpenAI::Resources::FineTuning::Jobs::Checkpoints#list + class FineTuningJobCheckpoint < OpenAI::Internal::Type::BaseModel # @!attribute id # The checkpoint identifier, which can be referenced in the API endpoints. # @@ -33,7 +34,7 @@ class FineTuningJobCheckpoint < OpenAI::BaseModel # Metrics at the step number during the fine-tuning job. # # @return [OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics] - required :metrics, -> { OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics } + required :metrics, -> { OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics } # @!attribute object # The object type, which is always "fine_tuning.job.checkpoint". @@ -47,122 +48,71 @@ class FineTuningJobCheckpoint < OpenAI::BaseModel # @return [Integer] required :step_number, Integer - # @!parse - # # The `fine_tuning.job.checkpoint` object represents a model checkpoint for a - # # fine-tuning job that is ready to use. - # # - # # @param id [String] - # # @param created_at [Integer] - # # @param fine_tuned_model_checkpoint [String] - # # @param fine_tuning_job_id [String] - # # @param metrics [OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics] - # # @param step_number [Integer] - # # @param object [Symbol, :"fine_tuning.job.checkpoint"] - # # - # def initialize( - # id:, - # created_at:, - # fine_tuned_model_checkpoint:, - # fine_tuning_job_id:, - # metrics:, - # step_number:, - # object: :"fine_tuning.job.checkpoint", - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Metrics < OpenAI::BaseModel - # @!attribute [r] full_valid_loss + # @!method initialize(id:, created_at:, fine_tuned_model_checkpoint:, fine_tuning_job_id:, metrics:, step_number:, object: :"fine_tuning.job.checkpoint") + # The `fine_tuning.job.checkpoint` object represents a model checkpoint for a + # fine-tuning job that is ready to use. + # + # @param id [String] The checkpoint identifier, which can be referenced in the API endpoints. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the checkpoint was created. + # + # @param fine_tuned_model_checkpoint [String] The name of the fine-tuned checkpoint model that is created. + # + # @param fine_tuning_job_id [String] The name of the fine-tuning job that this checkpoint was created from. + # + # @param metrics [OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics] Metrics at the step number during the fine-tuning job. + # + # @param step_number [Integer] The step number that the checkpoint was created at. + # + # @param object [Symbol, :"fine_tuning.job.checkpoint"] The object type, which is always "fine_tuning.job.checkpoint". + + # @see OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint#metrics + class Metrics < OpenAI::Internal::Type::BaseModel + # @!attribute full_valid_loss # # @return [Float, nil] optional :full_valid_loss, Float - # @!parse - # # @return [Float] - # attr_writer :full_valid_loss - - # @!attribute [r] full_valid_mean_token_accuracy + # @!attribute full_valid_mean_token_accuracy # # @return [Float, nil] optional :full_valid_mean_token_accuracy, Float - # @!parse - # # @return [Float] - # attr_writer :full_valid_mean_token_accuracy - - # @!attribute [r] step + # @!attribute step # # @return [Float, nil] optional :step, Float - # @!parse - # # @return [Float] - # attr_writer :step - - # @!attribute [r] train_loss + # @!attribute train_loss # # @return [Float, nil] optional :train_loss, Float - # @!parse - # # @return [Float] - # attr_writer :train_loss - - # @!attribute [r] train_mean_token_accuracy + # @!attribute train_mean_token_accuracy # # @return [Float, nil] optional :train_mean_token_accuracy, Float - # @!parse - # # @return [Float] - # attr_writer :train_mean_token_accuracy - - # @!attribute [r] valid_loss + # @!attribute valid_loss # # @return [Float, nil] optional :valid_loss, Float - # @!parse - # # @return [Float] - # attr_writer :valid_loss - - # @!attribute [r] valid_mean_token_accuracy + # @!attribute valid_mean_token_accuracy # # @return [Float, nil] optional :valid_mean_token_accuracy, Float - # @!parse - # # @return [Float] - # attr_writer :valid_mean_token_accuracy - - # @!parse - # # Metrics at the step number during the fine-tuning job. - # # - # # @param full_valid_loss [Float] - # # @param full_valid_mean_token_accuracy [Float] - # # @param step [Float] - # # @param train_loss [Float] - # # @param train_mean_token_accuracy [Float] - # # @param valid_loss [Float] - # # @param valid_mean_token_accuracy [Float] - # # - # def initialize( - # full_valid_loss: nil, - # full_valid_mean_token_accuracy: nil, - # step: nil, - # train_loss: nil, - # train_mean_token_accuracy: nil, - # valid_loss: nil, - # valid_mean_token_accuracy: nil, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(full_valid_loss: nil, full_valid_mean_token_accuracy: nil, step: nil, train_loss: nil, train_mean_token_accuracy: nil, valid_loss: nil, valid_mean_token_accuracy: nil) + # Metrics at the step number during the fine-tuning job. + # + # @param full_valid_loss [Float] + # @param full_valid_mean_token_accuracy [Float] + # @param step [Float] + # @param train_loss [Float] + # @param train_mean_token_accuracy [Float] + # @param valid_loss [Float] + # @param valid_mean_token_accuracy [Float] end end end diff --git a/lib/openai/models/fine_tuning/reinforcement_hyperparameters.rb b/lib/openai/models/fine_tuning/reinforcement_hyperparameters.rb new file mode 100644 index 00000000..160bc34c --- /dev/null +++ b/lib/openai/models/fine_tuning/reinforcement_hyperparameters.rb @@ -0,0 +1,178 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + class ReinforcementHyperparameters < OpenAI::Internal::Type::BaseModel + # @!attribute batch_size + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + # + # @return [Symbol, :auto, Integer, nil] + optional :batch_size, union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::BatchSize } + + # @!attribute compute_multiplier + # Multiplier on amount of compute used for exploring search space during training. + # + # @return [Symbol, :auto, Float, nil] + optional :compute_multiplier, + union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::ComputeMultiplier } + + # @!attribute eval_interval + # The number of training steps between evaluation runs. + # + # @return [Symbol, :auto, Integer, nil] + optional :eval_interval, union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::EvalInterval } + + # @!attribute eval_samples + # Number of evaluation samples to generate per training step. + # + # @return [Symbol, :auto, Integer, nil] + optional :eval_samples, union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::EvalSamples } + + # @!attribute learning_rate_multiplier + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + # + # @return [Symbol, :auto, Float, nil] + optional :learning_rate_multiplier, + union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::LearningRateMultiplier } + + # @!attribute n_epochs + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + # + # @return [Symbol, :auto, Integer, nil] + optional :n_epochs, union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::NEpochs } + + # @!attribute reasoning_effort + # Level of reasoning effort. + # + # @return [Symbol, OpenAI::Models::FineTuning::ReinforcementHyperparameters::ReasoningEffort, nil] + optional :reasoning_effort, enum: -> { OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort } + + # @!method initialize(batch_size: nil, compute_multiplier: nil, eval_interval: nil, eval_samples: nil, learning_rate_multiplier: nil, n_epochs: nil, reasoning_effort: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::ReinforcementHyperparameters} for more details. + # + # The hyperparameters used for the reinforcement fine-tuning job. + # + # @param batch_size [Symbol, :auto, Integer] Number of examples in each batch. A larger batch size means that model parameter + # + # @param compute_multiplier [Symbol, :auto, Float] Multiplier on amount of compute used for exploring search space during training. + # + # @param eval_interval [Symbol, :auto, Integer] The number of training steps between evaluation runs. + # + # @param eval_samples [Symbol, :auto, Integer] Number of evaluation samples to generate per training step. + # + # @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a + # + # @param n_epochs [Symbol, :auto, Integer] The number of epochs to train the model for. An epoch refers to one full cycle t + # + # @param reasoning_effort [Symbol, OpenAI::Models::FineTuning::ReinforcementHyperparameters::ReasoningEffort] Level of reasoning effort. + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + # + # @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#batch_size + module BatchSize + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] + end + + # Multiplier on amount of compute used for exploring search space during training. + # + # @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#compute_multiplier + module ComputeMultiplier + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Float + + # @!method self.variants + # @return [Array(Symbol, :auto, Float)] + end + + # The number of training steps between evaluation runs. + # + # @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#eval_interval + module EvalInterval + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] + end + + # Number of evaluation samples to generate per training step. + # + # @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#eval_samples + module EvalSamples + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] + end + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + # + # @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#learning_rate_multiplier + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Float + + # @!method self.variants + # @return [Array(Symbol, :auto, Float)] + end + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + # + # @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#n_epochs + module NEpochs + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] + end + + # Level of reasoning effort. + # + # @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#reasoning_effort + module ReasoningEffort + extend OpenAI::Internal::Type::Enum + + DEFAULT = :default + LOW = :low + MEDIUM = :medium + HIGH = :high + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/reinforcement_method.rb b/lib/openai/models/fine_tuning/reinforcement_method.rb new file mode 100644 index 00000000..40395ee8 --- /dev/null +++ b/lib/openai/models/fine_tuning/reinforcement_method.rb @@ -0,0 +1,53 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + class ReinforcementMethod < OpenAI::Internal::Type::BaseModel + # @!attribute grader + # The grader used for the fine-tuning job. + # + # @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] + required :grader, union: -> { OpenAI::FineTuning::ReinforcementMethod::Grader } + + # @!attribute hyperparameters + # The hyperparameters used for the reinforcement fine-tuning job. + # + # @return [OpenAI::Models::FineTuning::ReinforcementHyperparameters, nil] + optional :hyperparameters, -> { OpenAI::FineTuning::ReinforcementHyperparameters } + + # @!method initialize(grader:, hyperparameters: nil) + # Configuration for the reinforcement fine-tuning method. + # + # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job. + # + # @param hyperparameters [OpenAI::Models::FineTuning::ReinforcementHyperparameters] The hyperparameters used for the reinforcement fine-tuning job. + + # The grader used for the fine-tuning job. + # + # @see OpenAI::Models::FineTuning::ReinforcementMethod#grader + module Grader + extend OpenAI::Internal::Type::Union + + # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + variant -> { OpenAI::Graders::StringCheckGrader } + + # A TextSimilarityGrader object which grades text based on similarity metrics. + variant -> { OpenAI::Graders::TextSimilarityGrader } + + # A PythonGrader object that runs a python script on the input. + variant -> { OpenAI::Graders::PythonGrader } + + # A ScoreModelGrader object that uses a model to assign a score to the input. + variant -> { OpenAI::Graders::ScoreModelGrader } + + # A MultiGrader object combines the output of multiple graders to produce a single score. + variant -> { OpenAI::Graders::MultiGrader } + + # @!method self.variants + # @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)] + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/supervised_hyperparameters.rb b/lib/openai/models/fine_tuning/supervised_hyperparameters.rb new file mode 100644 index 00000000..a7965756 --- /dev/null +++ b/lib/openai/models/fine_tuning/supervised_hyperparameters.rb @@ -0,0 +1,88 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + class SupervisedHyperparameters < OpenAI::Internal::Type::BaseModel + # @!attribute batch_size + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + # + # @return [Symbol, :auto, Integer, nil] + optional :batch_size, union: -> { OpenAI::FineTuning::SupervisedHyperparameters::BatchSize } + + # @!attribute learning_rate_multiplier + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + # + # @return [Symbol, :auto, Float, nil] + optional :learning_rate_multiplier, + union: -> { OpenAI::FineTuning::SupervisedHyperparameters::LearningRateMultiplier } + + # @!attribute n_epochs + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + # + # @return [Symbol, :auto, Integer, nil] + optional :n_epochs, union: -> { OpenAI::FineTuning::SupervisedHyperparameters::NEpochs } + + # @!method initialize(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::SupervisedHyperparameters} for more details. + # + # The hyperparameters used for the fine-tuning job. + # + # @param batch_size [Symbol, :auto, Integer] Number of examples in each batch. A larger batch size means that model parameter + # + # @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a + # + # @param n_epochs [Symbol, :auto, Integer] The number of epochs to train the model for. An epoch refers to one full cycle t + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + # + # @see OpenAI::Models::FineTuning::SupervisedHyperparameters#batch_size + module BatchSize + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] + end + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + # + # @see OpenAI::Models::FineTuning::SupervisedHyperparameters#learning_rate_multiplier + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Float + + # @!method self.variants + # @return [Array(Symbol, :auto, Float)] + end + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + # + # @see OpenAI::Models::FineTuning::SupervisedHyperparameters#n_epochs + module NEpochs + extend OpenAI::Internal::Type::Union + + variant const: :auto + + variant Integer + + # @!method self.variants + # @return [Array(Symbol, :auto, Integer)] + end + end + end + end +end diff --git a/lib/openai/models/fine_tuning/supervised_method.rb b/lib/openai/models/fine_tuning/supervised_method.rb new file mode 100644 index 00000000..f81f3648 --- /dev/null +++ b/lib/openai/models/fine_tuning/supervised_method.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module FineTuning + class SupervisedMethod < OpenAI::Internal::Type::BaseModel + # @!attribute hyperparameters + # The hyperparameters used for the fine-tuning job. + # + # @return [OpenAI::Models::FineTuning::SupervisedHyperparameters, nil] + optional :hyperparameters, -> { OpenAI::FineTuning::SupervisedHyperparameters } + + # @!method initialize(hyperparameters: nil) + # Configuration for the supervised fine-tuning method. + # + # @param hyperparameters [OpenAI::Models::FineTuning::SupervisedHyperparameters] The hyperparameters used for the fine-tuning job. + end + end + end +end diff --git a/lib/openai/models/function_definition.rb b/lib/openai/models/function_definition.rb index 8d805ee6..6471ea8e 100644 --- a/lib/openai/models/function_definition.rb +++ b/lib/openai/models/function_definition.rb @@ -2,60 +2,54 @@ module OpenAI module Models - class FunctionDefinition < OpenAI::BaseModel + class FunctionDefinition < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain - # underscores and dashes, with a maximum length of 64. + # underscores and dashes, with a maximum length of 64. # # @return [String] required :name, String - # @!attribute [r] description + # @!attribute description # A description of what the function does, used by the model to choose when and - # how to call the function. + # how to call the function. # # @return [String, nil] optional :description, String - # @!parse - # # @return [String] - # attr_writer :description - - # @!attribute [r] parameters + # @!attribute parameters # The parameters the functions accepts, described as a JSON Schema object. See the - # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, - # and the - # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - # documentation about the format. + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. # - # Omitting `parameters` defines a function with an empty parameter list. + # Omitting `parameters` defines a function with an empty parameter list. # # @return [Hash{Symbol=>Object}, nil] - optional :parameters, OpenAI::HashOf[OpenAI::Unknown] - - # @!parse - # # @return [Hash{Symbol=>Object}] - # attr_writer :parameters + optional :parameters, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] # @!attribute strict # Whether to enable strict schema adherence when generating the function call. If - # set to true, the model will follow the exact schema defined in the `parameters` - # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn - # more about Structured Outputs in the - # [function calling guide](docs/guides/function-calling). + # set to true, the model will follow the exact schema defined in the `parameters` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn + # more about Structured Outputs in the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling). # # @return [Boolean, nil] - optional :strict, OpenAI::BooleanModel, nil?: true + optional :strict, OpenAI::Internal::Type::Boolean, nil?: true - # @!parse - # # @param name [String] - # # @param description [String] - # # @param parameters [Hash{Symbol=>Object}] - # # @param strict [Boolean, nil] - # # - # def initialize(name:, description: nil, parameters: nil, strict: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(name:, description: nil, parameters: nil, strict: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FunctionDefinition} for more details. + # + # @param name [String] The name of the function to be called. Must be a-z, A-Z, 0-9, or contain undersc + # + # @param description [String] A description of what the function does, used by the model to choose when and ho + # + # @param parameters [Hash{Symbol=>Object}] The parameters the functions accepts, described as a JSON Schema object. See the + # + # @param strict [Boolean, nil] Whether to enable strict schema adherence when generating the function call. If end end end diff --git a/lib/openai/models/function_parameters.rb b/lib/openai/models/function_parameters.rb index a443f9b9..efd72125 100644 --- a/lib/openai/models/function_parameters.rb +++ b/lib/openai/models/function_parameters.rb @@ -2,6 +2,7 @@ module OpenAI module Models - FunctionParameters = OpenAI::HashOf[OpenAI::Unknown] + # @type [OpenAI::Internal::Type::Converter] + FunctionParameters = OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] end end diff --git a/lib/openai/models/graders/label_model_grader.rb b/lib/openai/models/graders/label_model_grader.rb new file mode 100644 index 00000000..bfed8c30 --- /dev/null +++ b/lib/openai/models/graders/label_model_grader.rb @@ -0,0 +1,214 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Graders + class LabelModelGrader < OpenAI::Internal::Type::BaseModel + # @!attribute input + # + # @return [Array] + required :input, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Graders::LabelModelGrader::Input] } + + # @!attribute labels + # The labels to assign to each item in the evaluation. + # + # @return [Array] + required :labels, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute model + # The model to use for the evaluation. Must support structured outputs. + # + # @return [String] + required :model, String + + # @!attribute name + # The name of the grader. + # + # @return [String] + required :name, String + + # @!attribute passing_labels + # The labels that indicate a passing result. Must be a subset of labels. + # + # @return [Array] + required :passing_labels, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute type + # The object type, which is always `label_model`. + # + # @return [Symbol, :label_model] + required :type, const: :label_model + + # @!method initialize(input:, labels:, model:, name:, passing_labels:, type: :label_model) + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + # + # @param input [Array] + # + # @param labels [Array] The labels to assign to each item in the evaluation. + # + # @param model [String] The model to use for the evaluation. Must support structured outputs. + # + # @param name [String] The name of the grader. + # + # @param passing_labels [Array] The labels that indicate a passing result. Must be a subset of labels. + # + # @param type [Symbol, :label_model] The object type, which is always `label_model`. + + class Input < OpenAI::Internal::Type::BaseModel + # @!attribute content + # Inputs to the model - can contain template strings. + # + # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, Array] + required :content, union: -> { OpenAI::Graders::LabelModelGrader::Input::Content } + + # @!attribute role + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @return [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Role] + required :role, enum: -> { OpenAI::Graders::LabelModelGrader::Input::Role } + + # @!attribute type + # The type of the message input. Always `message`. + # + # @return [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Type, nil] + optional :type, enum: -> { OpenAI::Graders::LabelModelGrader::Input::Type } + + # @!method initialize(content:, role:, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Graders::LabelModelGrader::Input} for more details. + # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + # + # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, Array] Inputs to the model - can contain template strings. + # + # @param role [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Role] The role of the message input. One of `user`, `assistant`, `system`, or + # + # @param type [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Type] The type of the message input. Always `message`. + + # Inputs to the model - can contain template strings. + # + # @see OpenAI::Models::Graders::LabelModelGrader::Input#content + module Content + extend OpenAI::Internal::Type::Union + + # A text input to the model. + variant String + + # A text input to the model. + variant -> { OpenAI::Responses::ResponseInputText } + + # A text output from the model. + variant -> { OpenAI::Graders::LabelModelGrader::Input::Content::OutputText } + + # An image input to the model. + variant -> { OpenAI::Graders::LabelModelGrader::Input::Content::InputImage } + + # A list of inputs, each of which may be either an input text or input image object. + variant -> { OpenAI::Models::Graders::LabelModelGrader::Input::Content::AnArrayOfInputTextAndInputImageArray } + + class OutputText < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the output text. Always `output_text`. + # + # @return [Symbol, :output_text] + required :type, const: :output_text + + # @!method initialize(text:, type: :output_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText} for more + # details. + # + # A text output from the model. + # + # @param text [String] The text output from the model. + # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + end + + class InputImage < OpenAI::Internal::Type::BaseModel + # @!attribute image_url + # The URL of the image input. + # + # @return [String] + required :image_url, String + + # @!attribute type + # The type of the image input. Always `input_image`. + # + # @return [Symbol, :input_image] + required :type, const: :input_image + + # @!attribute detail + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @return [String, nil] + optional :detail, String + + # @!method initialize(image_url:, detail: nil, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage} for more + # details. + # + # An image input to the model. + # + # @param image_url [String] The URL of the image input. + # + # @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param type [Symbol, :input_image] The type of the image input. Always `input_image`. + end + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, Array)] + + # @type [OpenAI::Internal::Type::Converter] + AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @see OpenAI::Models::Graders::LabelModelGrader::Input#role + module Role + extend OpenAI::Internal::Type::Enum + + USER = :user + ASSISTANT = :assistant + SYSTEM = :system + DEVELOPER = :developer + + # @!method self.values + # @return [Array] + end + + # The type of the message input. Always `message`. + # + # @see OpenAI::Models::Graders::LabelModelGrader::Input#type + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE = :message + + # @!method self.values + # @return [Array] + end + end + end + end + + LabelModelGrader = Graders::LabelModelGrader + end +end diff --git a/lib/openai/models/graders/multi_grader.rb b/lib/openai/models/graders/multi_grader.rb new file mode 100644 index 00000000..0f5bd82e --- /dev/null +++ b/lib/openai/models/graders/multi_grader.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Graders + class MultiGrader < OpenAI::Internal::Type::BaseModel + # @!attribute calculate_output + # A formula to calculate the output based on grader results. + # + # @return [String] + required :calculate_output, String + + # @!attribute graders + # A StringCheckGrader object that performs a string comparison between input and + # reference using a specified operation. + # + # @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader] + required :graders, union: -> { OpenAI::Graders::MultiGrader::Graders } + + # @!attribute name + # The name of the grader. + # + # @return [String] + required :name, String + + # @!attribute type + # The object type, which is always `multi`. + # + # @return [Symbol, :multi] + required :type, const: :multi + + # @!method initialize(calculate_output:, graders:, name:, type: :multi) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Graders::MultiGrader} for more details. + # + # A MultiGrader object combines the output of multiple graders to produce a single + # score. + # + # @param calculate_output [String] A formula to calculate the output based on grader results. + # + # @param graders [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader] A StringCheckGrader object that performs a string comparison between input and r + # + # @param name [String] The name of the grader. + # + # @param type [Symbol, :multi] The object type, which is always `multi`. + + # A StringCheckGrader object that performs a string comparison between input and + # reference using a specified operation. + # + # @see OpenAI::Models::Graders::MultiGrader#graders + module Graders + extend OpenAI::Internal::Type::Union + + # A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + variant -> { OpenAI::Graders::StringCheckGrader } + + # A TextSimilarityGrader object which grades text based on similarity metrics. + variant -> { OpenAI::Graders::TextSimilarityGrader } + + # A PythonGrader object that runs a python script on the input. + variant -> { OpenAI::Graders::PythonGrader } + + # A ScoreModelGrader object that uses a model to assign a score to the input. + variant -> { OpenAI::Graders::ScoreModelGrader } + + # A LabelModelGrader object which uses a model to assign labels to each item + # in the evaluation. + variant -> { OpenAI::Graders::LabelModelGrader } + + # @!method self.variants + # @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader)] + end + end + end + + MultiGrader = Graders::MultiGrader + end +end diff --git a/lib/openai/models/graders/python_grader.rb b/lib/openai/models/graders/python_grader.rb new file mode 100644 index 00000000..f5fcae1c --- /dev/null +++ b/lib/openai/models/graders/python_grader.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Graders + class PythonGrader < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the grader. + # + # @return [String] + required :name, String + + # @!attribute source + # The source code of the python script. + # + # @return [String] + required :source, String + + # @!attribute type + # The object type, which is always `python`. + # + # @return [Symbol, :python] + required :type, const: :python + + # @!attribute image_tag + # The image tag to use for the python script. + # + # @return [String, nil] + optional :image_tag, String + + # @!method initialize(name:, source:, image_tag: nil, type: :python) + # A PythonGrader object that runs a python script on the input. + # + # @param name [String] The name of the grader. + # + # @param source [String] The source code of the python script. + # + # @param image_tag [String] The image tag to use for the python script. + # + # @param type [Symbol, :python] The object type, which is always `python`. + end + end + + PythonGrader = Graders::PythonGrader + end +end diff --git a/lib/openai/models/graders/score_model_grader.rb b/lib/openai/models/graders/score_model_grader.rb new file mode 100644 index 00000000..47417994 --- /dev/null +++ b/lib/openai/models/graders/score_model_grader.rb @@ -0,0 +1,214 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Graders + class ScoreModelGrader < OpenAI::Internal::Type::BaseModel + # @!attribute input + # The input text. This may include template strings. + # + # @return [Array] + required :input, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Graders::ScoreModelGrader::Input] } + + # @!attribute model + # The model to use for the evaluation. + # + # @return [String] + required :model, String + + # @!attribute name + # The name of the grader. + # + # @return [String] + required :name, String + + # @!attribute type + # The object type, which is always `score_model`. + # + # @return [Symbol, :score_model] + required :type, const: :score_model + + # @!attribute range + # The range of the score. Defaults to `[0, 1]`. + # + # @return [Array, nil] + optional :range, OpenAI::Internal::Type::ArrayOf[Float] + + # @!attribute sampling_params + # The sampling parameters for the model. + # + # @return [Object, nil] + optional :sampling_params, OpenAI::Internal::Type::Unknown + + # @!method initialize(input:, model:, name:, range: nil, sampling_params: nil, type: :score_model) + # A ScoreModelGrader object that uses a model to assign a score to the input. + # + # @param input [Array] The input text. This may include template strings. + # + # @param model [String] The model to use for the evaluation. + # + # @param name [String] The name of the grader. + # + # @param range [Array] The range of the score. Defaults to `[0, 1]`. + # + # @param sampling_params [Object] The sampling parameters for the model. + # + # @param type [Symbol, :score_model] The object type, which is always `score_model`. + + class Input < OpenAI::Internal::Type::BaseModel + # @!attribute content + # Inputs to the model - can contain template strings. + # + # @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, Array] + required :content, union: -> { OpenAI::Graders::ScoreModelGrader::Input::Content } + + # @!attribute role + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @return [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Role] + required :role, enum: -> { OpenAI::Graders::ScoreModelGrader::Input::Role } + + # @!attribute type + # The type of the message input. Always `message`. + # + # @return [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Type, nil] + optional :type, enum: -> { OpenAI::Graders::ScoreModelGrader::Input::Type } + + # @!method initialize(content:, role:, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Graders::ScoreModelGrader::Input} for more details. + # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + # + # @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, Array] Inputs to the model - can contain template strings. + # + # @param role [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Role] The role of the message input. One of `user`, `assistant`, `system`, or + # + # @param type [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Type] The type of the message input. Always `message`. + + # Inputs to the model - can contain template strings. + # + # @see OpenAI::Models::Graders::ScoreModelGrader::Input#content + module Content + extend OpenAI::Internal::Type::Union + + # A text input to the model. + variant String + + # A text input to the model. + variant -> { OpenAI::Responses::ResponseInputText } + + # A text output from the model. + variant -> { OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText } + + # An image input to the model. + variant -> { OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage } + + # A list of inputs, each of which may be either an input text or input image object. + variant -> { OpenAI::Models::Graders::ScoreModelGrader::Input::Content::AnArrayOfInputTextAndInputImageArray } + + class OutputText < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the output text. Always `output_text`. + # + # @return [Symbol, :output_text] + required :type, const: :output_text + + # @!method initialize(text:, type: :output_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText} for more + # details. + # + # A text output from the model. + # + # @param text [String] The text output from the model. + # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + end + + class InputImage < OpenAI::Internal::Type::BaseModel + # @!attribute image_url + # The URL of the image input. + # + # @return [String] + required :image_url, String + + # @!attribute type + # The type of the image input. Always `input_image`. + # + # @return [Symbol, :input_image] + required :type, const: :input_image + + # @!attribute detail + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + # + # @return [String, nil] + optional :detail, String + + # @!method initialize(image_url:, detail: nil, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage} for more + # details. + # + # An image input to the model. + # + # @param image_url [String] The URL of the image input. + # + # @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param type [Symbol, :input_image] The type of the image input. Always `input_image`. + end + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, Array)] + + # @type [OpenAI::Internal::Type::Converter] + AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown] + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + # + # @see OpenAI::Models::Graders::ScoreModelGrader::Input#role + module Role + extend OpenAI::Internal::Type::Enum + + USER = :user + ASSISTANT = :assistant + SYSTEM = :system + DEVELOPER = :developer + + # @!method self.values + # @return [Array] + end + + # The type of the message input. Always `message`. + # + # @see OpenAI::Models::Graders::ScoreModelGrader::Input#type + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE = :message + + # @!method self.values + # @return [Array] + end + end + end + end + + ScoreModelGrader = Graders::ScoreModelGrader + end +end diff --git a/lib/openai/models/graders/string_check_grader.rb b/lib/openai/models/graders/string_check_grader.rb new file mode 100644 index 00000000..2d8f5a30 --- /dev/null +++ b/lib/openai/models/graders/string_check_grader.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Graders + class StringCheckGrader < OpenAI::Internal::Type::BaseModel + # @!attribute input + # The input text. This may include template strings. + # + # @return [String] + required :input, String + + # @!attribute name + # The name of the grader. + # + # @return [String] + required :name, String + + # @!attribute operation + # The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + # + # @return [Symbol, OpenAI::Models::Graders::StringCheckGrader::Operation] + required :operation, enum: -> { OpenAI::Graders::StringCheckGrader::Operation } + + # @!attribute reference + # The reference text. This may include template strings. + # + # @return [String] + required :reference, String + + # @!attribute type + # The object type, which is always `string_check`. + # + # @return [Symbol, :string_check] + required :type, const: :string_check + + # @!method initialize(input:, name:, operation:, reference:, type: :string_check) + # A StringCheckGrader object that performs a string comparison between input and + # reference using a specified operation. + # + # @param input [String] The input text. This may include template strings. + # + # @param name [String] The name of the grader. + # + # @param operation [Symbol, OpenAI::Models::Graders::StringCheckGrader::Operation] The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + # + # @param reference [String] The reference text. This may include template strings. + # + # @param type [Symbol, :string_check] The object type, which is always `string_check`. + + # The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + # + # @see OpenAI::Models::Graders::StringCheckGrader#operation + module Operation + extend OpenAI::Internal::Type::Enum + + EQ = :eq + NE = :ne + LIKE = :like + ILIKE = :ilike + + # @!method self.values + # @return [Array] + end + end + end + + StringCheckGrader = Graders::StringCheckGrader + end +end diff --git a/lib/openai/models/graders/text_similarity_grader.rb b/lib/openai/models/graders/text_similarity_grader.rb new file mode 100644 index 00000000..8cc51499 --- /dev/null +++ b/lib/openai/models/graders/text_similarity_grader.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Graders + class TextSimilarityGrader < OpenAI::Internal::Type::BaseModel + # @!attribute evaluation_metric + # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + # + # @return [Symbol, OpenAI::Models::Graders::TextSimilarityGrader::EvaluationMetric] + required :evaluation_metric, enum: -> { OpenAI::Graders::TextSimilarityGrader::EvaluationMetric } + + # @!attribute input + # The text being graded. + # + # @return [String] + required :input, String + + # @!attribute name + # The name of the grader. + # + # @return [String] + required :name, String + + # @!attribute reference + # The text being graded against. + # + # @return [String] + required :reference, String + + # @!attribute type + # The type of grader. + # + # @return [Symbol, :text_similarity] + required :type, const: :text_similarity + + # @!method initialize(evaluation_metric:, input:, name:, reference:, type: :text_similarity) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Graders::TextSimilarityGrader} for more details. + # + # A TextSimilarityGrader object which grades text based on similarity metrics. + # + # @param evaluation_metric [Symbol, OpenAI::Models::Graders::TextSimilarityGrader::EvaluationMetric] The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, + # + # @param input [String] The text being graded. + # + # @param name [String] The name of the grader. + # + # @param reference [String] The text being graded against. + # + # @param type [Symbol, :text_similarity] The type of grader. + + # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + # + # @see OpenAI::Models::Graders::TextSimilarityGrader#evaluation_metric + module EvaluationMetric + extend OpenAI::Internal::Type::Enum + + COSINE = :cosine + FUZZY_MATCH = :fuzzy_match + BLEU = :bleu + GLEU = :gleu + METEOR = :meteor + ROUGE_1 = :rouge_1 + ROUGE_2 = :rouge_2 + ROUGE_3 = :rouge_3 + ROUGE_4 = :rouge_4 + ROUGE_5 = :rouge_5 + ROUGE_L = :rouge_l + + # @!method self.values + # @return [Array] + end + end + end + + TextSimilarityGrader = Graders::TextSimilarityGrader + end +end diff --git a/lib/openai/models/image.rb b/lib/openai/models/image.rb index c8ad72d4..bedd8bcf 100644 --- a/lib/openai/models/image.rb +++ b/lib/openai/models/image.rb @@ -2,49 +2,40 @@ module OpenAI module Models - class Image < OpenAI::BaseModel - # @!attribute [r] b64_json - # The base64-encoded JSON of the generated image, if `response_format` is - # `b64_json`. + class Image < OpenAI::Internal::Type::BaseModel + # @!attribute b64_json + # The base64-encoded JSON of the generated image. Default value for `gpt-image-1`, + # and only present if `response_format` is set to `b64_json` for `dall-e-2` and + # `dall-e-3`. # # @return [String, nil] optional :b64_json, String - # @!parse - # # @return [String] - # attr_writer :b64_json - - # @!attribute [r] revised_prompt - # The prompt that was used to generate the image, if there was any revision to the - # prompt. + # @!attribute revised_prompt + # For `dall-e-3` only, the revised prompt that was used to generate the image. # # @return [String, nil] optional :revised_prompt, String - # @!parse - # # @return [String] - # attr_writer :revised_prompt - - # @!attribute [r] url - # The URL of the generated image, if `response_format` is `url` (default). + # @!attribute url + # When using `dall-e-2` or `dall-e-3`, the URL of the generated image if + # `response_format` is set to `url` (default value). Unsupported for + # `gpt-image-1`. # # @return [String, nil] optional :url, String - # @!parse - # # @return [String] - # attr_writer :url - - # @!parse - # # Represents the url or the content of an image generated by the OpenAI API. - # # - # # @param b64_json [String] - # # @param revised_prompt [String] - # # @param url [String] - # # - # def initialize(b64_json: nil, revised_prompt: nil, url: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(b64_json: nil, revised_prompt: nil, url: nil) + # Some parameter documentations has been truncated, see {OpenAI::Models::Image} + # for more details. + # + # Represents the content or the URL of an image generated by the OpenAI API. + # + # @param b64_json [String] The base64-encoded JSON of the generated image. Default value for `gpt-image-1`, + # + # @param revised_prompt [String] For `dall-e-3` only, the revised prompt that was used to generate the image. + # + # @param url [String] When using `dall-e-2` or `dall-e-3`, the URL of the generated image if `response end end end diff --git a/lib/openai/models/image_create_variation_params.rb b/lib/openai/models/image_create_variation_params.rb index df16e7e4..29559ca4 100644 --- a/lib/openai/models/image_create_variation_params.rb +++ b/lib/openai/models/image_create_variation_params.rb @@ -2,107 +2,110 @@ module OpenAI module Models - class ImageCreateVariationParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Images#create_variation + class ImageCreateVariationParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute image # The image to use as the basis for the variation(s). Must be a valid PNG file, - # less than 4MB, and square. + # less than 4MB, and square. # - # @return [IO, StringIO] - required :image, IO + # @return [Pathname, StringIO, IO, String, OpenAI::FilePart] + required :image, OpenAI::Internal::Type::FileInput # @!attribute model # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # time. # # @return [String, Symbol, OpenAI::Models::ImageModel, nil] - optional :model, union: -> { OpenAI::Models::ImageCreateVariationParams::Model }, nil?: true + optional :model, union: -> { OpenAI::ImageCreateVariationParams::Model }, nil?: true # @!attribute n - # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - # `n=1` is supported. + # The number of images to generate. Must be between 1 and 10. # # @return [Integer, nil] optional :n, Integer, nil?: true # @!attribute response_format # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. # # @return [Symbol, OpenAI::Models::ImageCreateVariationParams::ResponseFormat, nil] - optional :response_format, - enum: -> { OpenAI::Models::ImageCreateVariationParams::ResponseFormat }, - nil?: true + optional :response_format, enum: -> { OpenAI::ImageCreateVariationParams::ResponseFormat }, nil?: true # @!attribute size # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # `1024x1024`. # # @return [Symbol, OpenAI::Models::ImageCreateVariationParams::Size, nil] - optional :size, enum: -> { OpenAI::Models::ImageCreateVariationParams::Size }, nil?: true + optional :size, enum: -> { OpenAI::ImageCreateVariationParams::Size }, nil?: true - # @!attribute [r] user + # @!attribute user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String - # @!parse - # # @return [String] - # attr_writer :user - - # @!parse - # # @param image [IO, StringIO] - # # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] - # # @param n [Integer, nil] - # # @param response_format [Symbol, OpenAI::Models::ImageCreateVariationParams::ResponseFormat, nil] - # # @param size [Symbol, OpenAI::Models::ImageCreateVariationParams::Size, nil] - # # @param user [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(image:, model: nil, n: nil, response_format: nil, size: nil, user: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(image:, model: nil, n: nil, response_format: nil, size: nil, user: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageCreateVariationParams} for more details. + # + # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart] The image to use as the basis for the variation(s). Must be a valid PNG file, le + # + # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` is supported at this time + # + # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. + # + # @param response_format [Symbol, OpenAI::Models::ImageCreateVariationParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or ` + # + # @param size [Symbol, OpenAI::Models::ImageCreateVariationParams::Size, nil] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # The model to use for image generation. Only `dall-e-2` is supported at this - # time. - class Model < OpenAI::Union + # time. + module Model + extend OpenAI::Internal::Type::Union + variant String # The model to use for image generation. Only `dall-e-2` is supported at this time. - variant enum: -> { OpenAI::Models::ImageModel } + variant enum: -> { OpenAI::ImageModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::ImageModel)] end - # @abstract - # # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. - class ResponseFormat < OpenAI::Enum + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. + module ResponseFormat + extend OpenAI::Internal::Type::Enum + URL = :url B64_JSON = :b64_json - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. - class Size < OpenAI::Enum - NUMBER_256X256 = :"256x256" - NUMBER_512X512 = :"512x512" - NUMBER_1024X1024 = :"1024x1024" + # `1024x1024`. + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_256X256 = :"256x256" + SIZE_512X512 = :"512x512" + SIZE_1024X1024 = :"1024x1024" - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/image_edit_completed_event.rb b/lib/openai/models/image_edit_completed_event.rb new file mode 100644 index 00000000..2038c5f9 --- /dev/null +++ b/lib/openai/models/image_edit_completed_event.rb @@ -0,0 +1,198 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute b64_json + # Base64-encoded final edited image data, suitable for rendering as an image. + # + # @return [String] + required :b64_json, String + + # @!attribute background + # The background setting for the edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Background] + required :background, enum: -> { OpenAI::ImageEditCompletedEvent::Background } + + # @!attribute created_at + # The Unix timestamp when the event was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute output_format + # The output format for the edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::OutputFormat] + required :output_format, enum: -> { OpenAI::ImageEditCompletedEvent::OutputFormat } + + # @!attribute quality + # The quality setting for the edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Quality] + required :quality, enum: -> { OpenAI::ImageEditCompletedEvent::Quality } + + # @!attribute size + # The size of the edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size] + required :size, enum: -> { OpenAI::ImageEditCompletedEvent::Size } + + # @!attribute type + # The type of the event. Always `image_edit.completed`. + # + # @return [Symbol, :"image_edit.completed"] + required :type, const: :"image_edit.completed" + + # @!attribute usage + # For `gpt-image-1` only, the token usage information for the image generation. + # + # @return [OpenAI::Models::ImageEditCompletedEvent::Usage] + required :usage, -> { OpenAI::ImageEditCompletedEvent::Usage } + + # @!method initialize(b64_json:, background:, created_at:, output_format:, quality:, size:, usage:, type: :"image_edit.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageEditCompletedEvent} for more details. + # + # Emitted when image editing has completed and the final image is available. + # + # @param b64_json [String] Base64-encoded final edited image data, suitable for rendering as an image. + # + # @param background [Symbol, OpenAI::Models::ImageEditCompletedEvent::Background] The background setting for the edited image. + # + # @param created_at [Integer] The Unix timestamp when the event was created. + # + # @param output_format [Symbol, OpenAI::Models::ImageEditCompletedEvent::OutputFormat] The output format for the edited image. + # + # @param quality [Symbol, OpenAI::Models::ImageEditCompletedEvent::Quality] The quality setting for the edited image. + # + # @param size [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size] The size of the edited image. + # + # @param usage [OpenAI::Models::ImageEditCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation. + # + # @param type [Symbol, :"image_edit.completed"] The type of the event. Always `image_edit.completed`. + + # The background setting for the edited image. + # + # @see OpenAI::Models::ImageEditCompletedEvent#background + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The output format for the edited image. + # + # @see OpenAI::Models::ImageEditCompletedEvent#output_format + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + WEBP = :webp + JPEG = :jpeg + + # @!method self.values + # @return [Array] + end + + # The quality setting for the edited image. + # + # @see OpenAI::Models::ImageEditCompletedEvent#quality + module Quality + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The size of the edited image. + # + # @see OpenAI::Models::ImageEditCompletedEvent#size + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024 = :"1024x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_1536X1024 = :"1536x1024" + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # @see OpenAI::Models::ImageEditCompletedEvent#usage + class Usage < OpenAI::Internal::Type::BaseModel + # @!attribute input_tokens + # The number of tokens (images and text) in the input prompt. + # + # @return [Integer] + required :input_tokens, Integer + + # @!attribute input_tokens_details + # The input tokens detailed information for the image generation. + # + # @return [OpenAI::Models::ImageEditCompletedEvent::Usage::InputTokensDetails] + required :input_tokens_details, -> { OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails } + + # @!attribute output_tokens + # The number of image tokens in the output image. + # + # @return [Integer] + required :output_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens (images and text) used for the image generation. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageEditCompletedEvent::Usage} for more details. + # + # For `gpt-image-1` only, the token usage information for the image generation. + # + # @param input_tokens [Integer] The number of tokens (images and text) in the input prompt. + # + # @param input_tokens_details [OpenAI::Models::ImageEditCompletedEvent::Usage::InputTokensDetails] The input tokens detailed information for the image generation. + # + # @param output_tokens [Integer] The number of image tokens in the output image. + # + # @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation. + + # @see OpenAI::Models::ImageEditCompletedEvent::Usage#input_tokens_details + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + # @!attribute image_tokens + # The number of image tokens in the input prompt. + # + # @return [Integer] + required :image_tokens, Integer + + # @!attribute text_tokens + # The number of text tokens in the input prompt. + # + # @return [Integer] + required :text_tokens, Integer + + # @!method initialize(image_tokens:, text_tokens:) + # The input tokens detailed information for the image generation. + # + # @param image_tokens [Integer] The number of image tokens in the input prompt. + # + # @param text_tokens [Integer] The number of text tokens in the input prompt. + end + end + end + end +end diff --git a/lib/openai/models/image_edit_params.rb b/lib/openai/models/image_edit_params.rb index 87317727..aee4b491 100644 --- a/lib/openai/models/image_edit_params.rb +++ b/lib/openai/models/image_edit_params.rb @@ -2,43 +2,68 @@ module OpenAI module Models - class ImageEditParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Images#edit + # + # @see OpenAI::Resources::Images#edit_stream_raw + class ImageEditParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute image - # The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - # is not provided, image must have transparency, which will be used as the mask. + # The image(s) to edit. Must be a supported image file or an array of images. # - # @return [IO, StringIO] - required :image, IO + # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + # 50MB. You can provide up to 16 images. + # + # For `dall-e-2`, you can only provide one image, and it should be a square `png` + # file less than 4MB. + # + # @return [Pathname, StringIO, IO, String, OpenAI::FilePart, Array] + required :image, union: -> { OpenAI::ImageEditParams::Image } # @!attribute prompt # A text description of the desired image(s). The maximum length is 1000 - # characters. + # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. # # @return [String] required :prompt, String - # @!attribute [r] mask - # An additional image whose fully transparent areas (e.g. where alpha is zero) - # indicate where `image` should be edited. Must be a valid PNG file, less than - # 4MB, and have the same dimensions as `image`. + # @!attribute background + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. # - # @return [IO, StringIO, nil] - optional :mask, IO + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + # + # @return [Symbol, OpenAI::Models::ImageEditParams::Background, nil] + optional :background, enum: -> { OpenAI::ImageEditParams::Background }, nil?: true - # @!parse - # # @return [IO, StringIO] - # attr_writer :mask + # @!attribute input_fidelity + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + # + # @return [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] + optional :input_fidelity, enum: -> { OpenAI::ImageEditParams::InputFidelity }, nil?: true + + # @!attribute mask + # An additional image whose fully transparent areas (e.g. where alpha is zero) + # indicate where `image` should be edited. If there are multiple images provided, + # the mask will be applied on the first image. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. + # + # @return [Pathname, StringIO, IO, String, OpenAI::FilePart, nil] + optional :mask, OpenAI::Internal::Type::FileInput # @!attribute model - # The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + # is used. # # @return [String, Symbol, OpenAI::Models::ImageModel, nil] - optional :model, union: -> { OpenAI::Models::ImageEditParams::Model }, nil?: true + optional :model, union: -> { OpenAI::ImageEditParams::Model }, nil?: true # @!attribute n # The number of images to generate. Must be between 1 and 10. @@ -46,94 +71,226 @@ class ImageEditParams < OpenAI::BaseModel # @return [Integer, nil] optional :n, Integer, nil?: true + # @!attribute output_compression + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + # + # @return [Integer, nil] + optional :output_compression, Integer, nil?: true + + # @!attribute output_format + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + # default value is `png`. + # + # @return [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] + optional :output_format, enum: -> { OpenAI::ImageEditParams::OutputFormat }, nil?: true + + # @!attribute partial_images + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # Note that the final image may be sent before the full number of partial images + # are generated if the full image is generated more quickly. + # + # @return [Integer, nil] + optional :partial_images, Integer, nil?: true + + # @!attribute quality + # The quality of the image that will be generated. `high`, `medium` and `low` are + # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + # Defaults to `auto`. + # + # @return [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] + optional :quality, enum: -> { OpenAI::ImageEditParams::Quality }, nil?: true + # @!attribute response_format # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + # will always return base64-encoded images. # # @return [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] - optional :response_format, enum: -> { OpenAI::Models::ImageEditParams::ResponseFormat }, nil?: true + optional :response_format, enum: -> { OpenAI::ImageEditParams::ResponseFormat }, nil?: true # @!attribute size - # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. # # @return [Symbol, OpenAI::Models::ImageEditParams::Size, nil] - optional :size, enum: -> { OpenAI::Models::ImageEditParams::Size }, nil?: true + optional :size, enum: -> { OpenAI::ImageEditParams::Size }, nil?: true - # @!attribute [r] user + # @!attribute user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String - # @!parse - # # @return [String] - # attr_writer :user - - # @!parse - # # @param image [IO, StringIO] - # # @param prompt [String] - # # @param mask [IO, StringIO] - # # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] - # # @param n [Integer, nil] - # # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] - # # @param size [Symbol, OpenAI::Models::ImageEditParams::Size, nil] - # # @param user [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # image:, - # prompt:, - # mask: nil, - # model: nil, - # n: nil, - # response_format: nil, - # size: nil, - # user: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # The model to use for image generation. Only `dall-e-2` is supported at this - # time. - class Model < OpenAI::Union - variant String + # @!method initialize(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageEditParams} for more details. + # + # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array] The image(s) to edit. Must be a supported image file or an array of images. + # + # @param prompt [String] A text description of the desired image(s). The maximum length is 1000 character + # + # @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s). + # + # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features, + # + # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind + # + # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup + # + # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. + # + # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter + # + # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is + # + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for + # + # @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are + # + # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or ` + # + # @param size [Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands + # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # The image(s) to edit. Must be a supported image file or an array of images. + # + # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + # 50MB. You can provide up to 16 images. + # + # For `dall-e-2`, you can only provide one image, and it should be a square `png` + # file less than 4MB. + module Image + extend OpenAI::Internal::Type::Union - # The model to use for image generation. Only `dall-e-2` is supported at this time. - variant enum: -> { OpenAI::Models::ImageModel } + variant OpenAI::Internal::Type::FileInput + + variant -> { OpenAI::Models::ImageEditParams::Image::StringArray } + + # @!method self.variants + # @return [Array(StringIO, Array)] + + # @type [OpenAI::Internal::Type::Converter] + StringArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::FileInput] end - # @abstract + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + module InputFidelity + extend OpenAI::Internal::Type::Enum + + HIGH = :high + LOW = :low + + # @!method self.values + # @return [Array] + end + + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + # is used. + module Model + extend OpenAI::Internal::Type::Union + + variant String + + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used. + variant enum: -> { OpenAI::ImageModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::ImageModel)] + end + + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + # default value is `png`. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + JPEG = :jpeg + WEBP = :webp + + # @!method self.values + # @return [Array] + end + + # The quality of the image that will be generated. `high`, `medium` and `low` are + # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + # Defaults to `auto`. + module Quality + extend OpenAI::Internal::Type::Enum + + STANDARD = :standard + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. - class ResponseFormat < OpenAI::Enum + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + # will always return base64-encoded images. + module ResponseFormat + extend OpenAI::Internal::Type::Enum + URL = :url B64_JSON = :b64_json - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. - class Size < OpenAI::Enum - NUMBER_256X256 = :"256x256" - NUMBER_512X512 = :"512x512" - NUMBER_1024X1024 = :"1024x1024" + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_256X256 = :"256x256" + SIZE_512X512 = :"512x512" + SIZE_1024X1024 = :"1024x1024" + SIZE_1536X1024 = :"1536x1024" + SIZE_1024X1536 = :"1024x1536" + AUTO = :auto - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/image_edit_partial_image_event.rb b/lib/openai/models/image_edit_partial_image_event.rb new file mode 100644 index 00000000..95d5bd96 --- /dev/null +++ b/lib/openai/models/image_edit_partial_image_event.rb @@ -0,0 +1,135 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ImageEditPartialImageEvent < OpenAI::Internal::Type::BaseModel + # @!attribute b64_json + # Base64-encoded partial image data, suitable for rendering as an image. + # + # @return [String] + required :b64_json, String + + # @!attribute background + # The background setting for the requested edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Background] + required :background, enum: -> { OpenAI::ImageEditPartialImageEvent::Background } + + # @!attribute created_at + # The Unix timestamp when the event was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute output_format + # The output format for the requested edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::OutputFormat] + required :output_format, enum: -> { OpenAI::ImageEditPartialImageEvent::OutputFormat } + + # @!attribute partial_image_index + # 0-based index for the partial image (streaming). + # + # @return [Integer] + required :partial_image_index, Integer + + # @!attribute quality + # The quality setting for the requested edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Quality] + required :quality, enum: -> { OpenAI::ImageEditPartialImageEvent::Quality } + + # @!attribute size + # The size of the requested edited image. + # + # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Size] + required :size, enum: -> { OpenAI::ImageEditPartialImageEvent::Size } + + # @!attribute type + # The type of the event. Always `image_edit.partial_image`. + # + # @return [Symbol, :"image_edit.partial_image"] + required :type, const: :"image_edit.partial_image" + + # @!method initialize(b64_json:, background:, created_at:, output_format:, partial_image_index:, quality:, size:, type: :"image_edit.partial_image") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageEditPartialImageEvent} for more details. + # + # Emitted when a partial image is available during image editing streaming. + # + # @param b64_json [String] Base64-encoded partial image data, suitable for rendering as an image. + # + # @param background [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Background] The background setting for the requested edited image. + # + # @param created_at [Integer] The Unix timestamp when the event was created. + # + # @param output_format [Symbol, OpenAI::Models::ImageEditPartialImageEvent::OutputFormat] The output format for the requested edited image. + # + # @param partial_image_index [Integer] 0-based index for the partial image (streaming). + # + # @param quality [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Quality] The quality setting for the requested edited image. + # + # @param size [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Size] The size of the requested edited image. + # + # @param type [Symbol, :"image_edit.partial_image"] The type of the event. Always `image_edit.partial_image`. + + # The background setting for the requested edited image. + # + # @see OpenAI::Models::ImageEditPartialImageEvent#background + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The output format for the requested edited image. + # + # @see OpenAI::Models::ImageEditPartialImageEvent#output_format + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + WEBP = :webp + JPEG = :jpeg + + # @!method self.values + # @return [Array] + end + + # The quality setting for the requested edited image. + # + # @see OpenAI::Models::ImageEditPartialImageEvent#quality + module Quality + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The size of the requested edited image. + # + # @see OpenAI::Models::ImageEditPartialImageEvent#size + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024 = :"1024x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_1536X1024 = :"1536x1024" + AUTO = :auto + + # @!method self.values + # @return [Array] + end + end + end +end diff --git a/lib/openai/models/image_edit_stream_event.rb b/lib/openai/models/image_edit_stream_event.rb new file mode 100644 index 00000000..b72d2c27 --- /dev/null +++ b/lib/openai/models/image_edit_stream_event.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # Emitted when a partial image is available during image editing streaming. + module ImageEditStreamEvent + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Emitted when a partial image is available during image editing streaming. + variant :"image_edit.partial_image", -> { OpenAI::ImageEditPartialImageEvent } + + # Emitted when image editing has completed and the final image is available. + variant :"image_edit.completed", -> { OpenAI::ImageEditCompletedEvent } + + # @!method self.variants + # @return [Array(OpenAI::Models::ImageEditPartialImageEvent, OpenAI::Models::ImageEditCompletedEvent)] + end + end +end diff --git a/lib/openai/models/image_gen_completed_event.rb b/lib/openai/models/image_gen_completed_event.rb new file mode 100644 index 00000000..8a730653 --- /dev/null +++ b/lib/openai/models/image_gen_completed_event.rb @@ -0,0 +1,198 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ImageGenCompletedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute b64_json + # Base64-encoded image data, suitable for rendering as an image. + # + # @return [String] + required :b64_json, String + + # @!attribute background + # The background setting for the generated image. + # + # @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Background] + required :background, enum: -> { OpenAI::ImageGenCompletedEvent::Background } + + # @!attribute created_at + # The Unix timestamp when the event was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute output_format + # The output format for the generated image. + # + # @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::OutputFormat] + required :output_format, enum: -> { OpenAI::ImageGenCompletedEvent::OutputFormat } + + # @!attribute quality + # The quality setting for the generated image. + # + # @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Quality] + required :quality, enum: -> { OpenAI::ImageGenCompletedEvent::Quality } + + # @!attribute size + # The size of the generated image. + # + # @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Size] + required :size, enum: -> { OpenAI::ImageGenCompletedEvent::Size } + + # @!attribute type + # The type of the event. Always `image_generation.completed`. + # + # @return [Symbol, :"image_generation.completed"] + required :type, const: :"image_generation.completed" + + # @!attribute usage + # For `gpt-image-1` only, the token usage information for the image generation. + # + # @return [OpenAI::Models::ImageGenCompletedEvent::Usage] + required :usage, -> { OpenAI::ImageGenCompletedEvent::Usage } + + # @!method initialize(b64_json:, background:, created_at:, output_format:, quality:, size:, usage:, type: :"image_generation.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageGenCompletedEvent} for more details. + # + # Emitted when image generation has completed and the final image is available. + # + # @param b64_json [String] Base64-encoded image data, suitable for rendering as an image. + # + # @param background [Symbol, OpenAI::Models::ImageGenCompletedEvent::Background] The background setting for the generated image. + # + # @param created_at [Integer] The Unix timestamp when the event was created. + # + # @param output_format [Symbol, OpenAI::Models::ImageGenCompletedEvent::OutputFormat] The output format for the generated image. + # + # @param quality [Symbol, OpenAI::Models::ImageGenCompletedEvent::Quality] The quality setting for the generated image. + # + # @param size [Symbol, OpenAI::Models::ImageGenCompletedEvent::Size] The size of the generated image. + # + # @param usage [OpenAI::Models::ImageGenCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation. + # + # @param type [Symbol, :"image_generation.completed"] The type of the event. Always `image_generation.completed`. + + # The background setting for the generated image. + # + # @see OpenAI::Models::ImageGenCompletedEvent#background + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The output format for the generated image. + # + # @see OpenAI::Models::ImageGenCompletedEvent#output_format + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + WEBP = :webp + JPEG = :jpeg + + # @!method self.values + # @return [Array] + end + + # The quality setting for the generated image. + # + # @see OpenAI::Models::ImageGenCompletedEvent#quality + module Quality + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The size of the generated image. + # + # @see OpenAI::Models::ImageGenCompletedEvent#size + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024 = :"1024x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_1536X1024 = :"1536x1024" + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # @see OpenAI::Models::ImageGenCompletedEvent#usage + class Usage < OpenAI::Internal::Type::BaseModel + # @!attribute input_tokens + # The number of tokens (images and text) in the input prompt. + # + # @return [Integer] + required :input_tokens, Integer + + # @!attribute input_tokens_details + # The input tokens detailed information for the image generation. + # + # @return [OpenAI::Models::ImageGenCompletedEvent::Usage::InputTokensDetails] + required :input_tokens_details, -> { OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails } + + # @!attribute output_tokens + # The number of image tokens in the output image. + # + # @return [Integer] + required :output_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens (images and text) used for the image generation. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageGenCompletedEvent::Usage} for more details. + # + # For `gpt-image-1` only, the token usage information for the image generation. + # + # @param input_tokens [Integer] The number of tokens (images and text) in the input prompt. + # + # @param input_tokens_details [OpenAI::Models::ImageGenCompletedEvent::Usage::InputTokensDetails] The input tokens detailed information for the image generation. + # + # @param output_tokens [Integer] The number of image tokens in the output image. + # + # @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation. + + # @see OpenAI::Models::ImageGenCompletedEvent::Usage#input_tokens_details + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + # @!attribute image_tokens + # The number of image tokens in the input prompt. + # + # @return [Integer] + required :image_tokens, Integer + + # @!attribute text_tokens + # The number of text tokens in the input prompt. + # + # @return [Integer] + required :text_tokens, Integer + + # @!method initialize(image_tokens:, text_tokens:) + # The input tokens detailed information for the image generation. + # + # @param image_tokens [Integer] The number of image tokens in the input prompt. + # + # @param text_tokens [Integer] The number of text tokens in the input prompt. + end + end + end + end +end diff --git a/lib/openai/models/image_gen_partial_image_event.rb b/lib/openai/models/image_gen_partial_image_event.rb new file mode 100644 index 00000000..33601e7c --- /dev/null +++ b/lib/openai/models/image_gen_partial_image_event.rb @@ -0,0 +1,135 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ImageGenPartialImageEvent < OpenAI::Internal::Type::BaseModel + # @!attribute b64_json + # Base64-encoded partial image data, suitable for rendering as an image. + # + # @return [String] + required :b64_json, String + + # @!attribute background + # The background setting for the requested image. + # + # @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Background] + required :background, enum: -> { OpenAI::ImageGenPartialImageEvent::Background } + + # @!attribute created_at + # The Unix timestamp when the event was created. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute output_format + # The output format for the requested image. + # + # @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::OutputFormat] + required :output_format, enum: -> { OpenAI::ImageGenPartialImageEvent::OutputFormat } + + # @!attribute partial_image_index + # 0-based index for the partial image (streaming). + # + # @return [Integer] + required :partial_image_index, Integer + + # @!attribute quality + # The quality setting for the requested image. + # + # @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Quality] + required :quality, enum: -> { OpenAI::ImageGenPartialImageEvent::Quality } + + # @!attribute size + # The size of the requested image. + # + # @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Size] + required :size, enum: -> { OpenAI::ImageGenPartialImageEvent::Size } + + # @!attribute type + # The type of the event. Always `image_generation.partial_image`. + # + # @return [Symbol, :"image_generation.partial_image"] + required :type, const: :"image_generation.partial_image" + + # @!method initialize(b64_json:, background:, created_at:, output_format:, partial_image_index:, quality:, size:, type: :"image_generation.partial_image") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageGenPartialImageEvent} for more details. + # + # Emitted when a partial image is available during image generation streaming. + # + # @param b64_json [String] Base64-encoded partial image data, suitable for rendering as an image. + # + # @param background [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Background] The background setting for the requested image. + # + # @param created_at [Integer] The Unix timestamp when the event was created. + # + # @param output_format [Symbol, OpenAI::Models::ImageGenPartialImageEvent::OutputFormat] The output format for the requested image. + # + # @param partial_image_index [Integer] 0-based index for the partial image (streaming). + # + # @param quality [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Quality] The quality setting for the requested image. + # + # @param size [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Size] The size of the requested image. + # + # @param type [Symbol, :"image_generation.partial_image"] The type of the event. Always `image_generation.partial_image`. + + # The background setting for the requested image. + # + # @see OpenAI::Models::ImageGenPartialImageEvent#background + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The output format for the requested image. + # + # @see OpenAI::Models::ImageGenPartialImageEvent#output_format + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + WEBP = :webp + JPEG = :jpeg + + # @!method self.values + # @return [Array] + end + + # The quality setting for the requested image. + # + # @see OpenAI::Models::ImageGenPartialImageEvent#quality + module Quality + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The size of the requested image. + # + # @see OpenAI::Models::ImageGenPartialImageEvent#size + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024 = :"1024x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_1536X1024 = :"1536x1024" + AUTO = :auto + + # @!method self.values + # @return [Array] + end + end + end +end diff --git a/lib/openai/models/image_gen_stream_event.rb b/lib/openai/models/image_gen_stream_event.rb new file mode 100644 index 00000000..91af7984 --- /dev/null +++ b/lib/openai/models/image_gen_stream_event.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # Emitted when a partial image is available during image generation streaming. + module ImageGenStreamEvent + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Emitted when a partial image is available during image generation streaming. + variant :"image_generation.partial_image", -> { OpenAI::ImageGenPartialImageEvent } + + # Emitted when image generation has completed and the final image is available. + variant :"image_generation.completed", -> { OpenAI::ImageGenCompletedEvent } + + # @!method self.variants + # @return [Array(OpenAI::Models::ImageGenPartialImageEvent, OpenAI::Models::ImageGenCompletedEvent)] + end + end +end diff --git a/lib/openai/models/image_generate_params.rb b/lib/openai/models/image_generate_params.rb index 9f1f00e2..a438755e 100644 --- a/lib/openai/models/image_generate_params.rb +++ b/lib/openai/models/image_generate_params.rb @@ -2,168 +2,285 @@ module OpenAI module Models - class ImageGenerateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Images#generate + # + # @see OpenAI::Resources::Images#generate_stream_raw + class ImageGenerateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute prompt - # A text description of the desired image(s). The maximum length is 1000 - # characters for `dall-e-2` and 4000 characters for `dall-e-3`. + # A text description of the desired image(s). The maximum length is 32000 + # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + # for `dall-e-3`. # # @return [String] required :prompt, String + # @!attribute background + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + # + # @return [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] + optional :background, enum: -> { OpenAI::ImageGenerateParams::Background }, nil?: true + # @!attribute model - # The model to use for image generation. + # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + # `gpt-image-1` is used. # # @return [String, Symbol, OpenAI::Models::ImageModel, nil] - optional :model, union: -> { OpenAI::Models::ImageGenerateParams::Model }, nil?: true + optional :model, union: -> { OpenAI::ImageGenerateParams::Model }, nil?: true + + # @!attribute moderation + # Control the content-moderation level for images generated by `gpt-image-1`. Must + # be either `low` for less restrictive filtering or `auto` (default value). + # + # @return [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] + optional :moderation, enum: -> { OpenAI::ImageGenerateParams::Moderation }, nil?: true # @!attribute n # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - # `n=1` is supported. + # `n=1` is supported. # # @return [Integer, nil] optional :n, Integer, nil?: true - # @!attribute [r] quality - # The quality of the image that will be generated. `hd` creates images with finer - # details and greater consistency across the image. This param is only supported - # for `dall-e-3`. + # @!attribute output_compression + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. # - # @return [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] - optional :quality, enum: -> { OpenAI::Models::ImageGenerateParams::Quality } + # @return [Integer, nil] + optional :output_compression, Integer, nil?: true + + # @!attribute output_format + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + # + # @return [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] + optional :output_format, enum: -> { OpenAI::ImageGenerateParams::OutputFormat }, nil?: true + + # @!attribute partial_images + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # Note that the final image may be sent before the full number of partial images + # are generated if the full image is generated more quickly. + # + # @return [Integer, nil] + optional :partial_images, Integer, nil?: true - # @!parse - # # @return [Symbol, OpenAI::Models::ImageGenerateParams::Quality] - # attr_writer :quality + # @!attribute quality + # The quality of the image that will be generated. + # + # - `auto` (default value) will automatically select the best quality for the + # given model. + # - `high`, `medium` and `low` are supported for `gpt-image-1`. + # - `hd` and `standard` are supported for `dall-e-3`. + # - `standard` is the only option for `dall-e-2`. + # + # @return [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] + optional :quality, enum: -> { OpenAI::ImageGenerateParams::Quality }, nil?: true # @!attribute response_format - # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # The format in which generated images with `dall-e-2` and `dall-e-3` are + # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + # after the image has been generated. This parameter isn't supported for + # `gpt-image-1` which will always return base64-encoded images. # # @return [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] - optional :response_format, enum: -> { OpenAI::Models::ImageGenerateParams::ResponseFormat }, nil?: true + optional :response_format, enum: -> { OpenAI::ImageGenerateParams::ResponseFormat }, nil?: true # @!attribute size - # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - # `1024x1792` for `dall-e-3` models. + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. # # @return [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] - optional :size, enum: -> { OpenAI::Models::ImageGenerateParams::Size }, nil?: true + optional :size, enum: -> { OpenAI::ImageGenerateParams::Size }, nil?: true # @!attribute style - # The style of the generated images. Must be one of `vivid` or `natural`. Vivid - # causes the model to lean towards generating hyper-real and dramatic images. - # Natural causes the model to produce more natural, less hyper-real looking - # images. This param is only supported for `dall-e-3`. + # The style of the generated images. This parameter is only supported for + # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + # towards generating hyper-real and dramatic images. Natural causes the model to + # produce more natural, less hyper-real looking images. # # @return [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] - optional :style, enum: -> { OpenAI::Models::ImageGenerateParams::Style }, nil?: true + optional :style, enum: -> { OpenAI::ImageGenerateParams::Style }, nil?: true - # @!attribute [r] user + # @!attribute user # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). # # @return [String, nil] optional :user, String - # @!parse - # # @return [String] - # attr_writer :user - - # @!parse - # # @param prompt [String] - # # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] - # # @param n [Integer, nil] - # # @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality] - # # @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] - # # @param size [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] - # # @param style [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] - # # @param user [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # prompt:, - # model: nil, - # n: nil, - # quality: nil, - # response_format: nil, - # size: nil, - # style: nil, - # user: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # The model to use for image generation. - class Model < OpenAI::Union + # @!method initialize(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageGenerateParams} for more details. + # + # @param prompt [String] A text description of the desired image(s). The maximum length is 32000 characte + # + # @param background [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] Allows to set transparency for the background of the generated image(s). + # + # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-im + # + # @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by `gpt-image-1`. Must + # + # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # + # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter is only + # + # @param output_format [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is only su + # + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for + # + # @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] The quality of the image that will be generated. + # + # @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned + # + # @param size [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands + # + # @param style [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] The style of the generated images. This parameter is only supported for `dall-e- + # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + # `gpt-image-1` is used. + module Model + extend OpenAI::Internal::Type::Union + variant String - # The model to use for image generation. - variant enum: -> { OpenAI::Models::ImageModel } + # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used. + variant enum: -> { OpenAI::ImageModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::ImageModel)] end - # @abstract + # Control the content-moderation level for images generated by `gpt-image-1`. Must + # be either `low` for less restrictive filtering or `auto` (default value). + module Moderation + extend OpenAI::Internal::Type::Enum + + LOW = :low + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + JPEG = :jpeg + WEBP = :webp + + # @!method self.values + # @return [Array] + end + + # The quality of the image that will be generated. # - # The quality of the image that will be generated. `hd` creates images with finer - # details and greater consistency across the image. This param is only supported - # for `dall-e-3`. - class Quality < OpenAI::Enum + # - `auto` (default value) will automatically select the best quality for the + # given model. + # - `high`, `medium` and `low` are supported for `gpt-image-1`. + # - `hd` and `standard` are supported for `dall-e-3`. + # - `standard` is the only option for `dall-e-2`. + module Quality + extend OpenAI::Internal::Type::Enum + STANDARD = :standard HD = :hd + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - # The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. - class ResponseFormat < OpenAI::Enum + # The format in which generated images with `dall-e-2` and `dall-e-3` are + # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + # after the image has been generated. This parameter isn't supported for + # `gpt-image-1` which will always return base64-encoded images. + module ResponseFormat + extend OpenAI::Internal::Type::Enum + URL = :url B64_JSON = :b64_json - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - # The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - # `1024x1792` for `dall-e-3` models. - class Size < OpenAI::Enum - NUMBER_256X256 = :"256x256" - NUMBER_512X512 = :"512x512" - NUMBER_1024X1024 = :"1024x1024" - NUMBER_1792X1024 = :"1792x1024" - NUMBER_1024X1792 = :"1024x1792" + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + module Size + extend OpenAI::Internal::Type::Enum - finalize! + AUTO = :auto + SIZE_1024X1024 = :"1024x1024" + SIZE_1536X1024 = :"1536x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_256X256 = :"256x256" + SIZE_512X512 = :"512x512" + SIZE_1792X1024 = :"1792x1024" + SIZE_1024X1792 = :"1024x1792" + + # @!method self.values + # @return [Array] end - # @abstract - # - # The style of the generated images. Must be one of `vivid` or `natural`. Vivid - # causes the model to lean towards generating hyper-real and dramatic images. - # Natural causes the model to produce more natural, less hyper-real looking - # images. This param is only supported for `dall-e-3`. - class Style < OpenAI::Enum + # The style of the generated images. This parameter is only supported for + # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + # towards generating hyper-real and dramatic images. Natural causes the model to + # produce more natural, less hyper-real looking images. + module Style + extend OpenAI::Internal::Type::Enum + VIVID = :vivid NATURAL = :natural - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/image_model.rb b/lib/openai/models/image_model.rb index c9c62780..0bd2690e 100644 --- a/lib/openai/models/image_model.rb +++ b/lib/openai/models/image_model.rb @@ -2,13 +2,15 @@ module OpenAI module Models - # @abstract - # - class ImageModel < OpenAI::Enum + module ImageModel + extend OpenAI::Internal::Type::Enum + DALL_E_2 = :"dall-e-2" DALL_E_3 = :"dall-e-3" + GPT_IMAGE_1 = :"gpt-image-1" - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/images_response.rb b/lib/openai/models/images_response.rb index 3f3f3b00..d1aee9f5 100644 --- a/lib/openai/models/images_response.rb +++ b/lib/openai/models/images_response.rb @@ -2,24 +2,188 @@ module OpenAI module Models - class ImagesResponse < OpenAI::BaseModel + # @see OpenAI::Resources::Images#create_variation + class ImagesResponse < OpenAI::Internal::Type::BaseModel # @!attribute created + # The Unix timestamp (in seconds) of when the image was created. # # @return [Integer] required :created, Integer + # @!attribute background + # The background parameter used for the image generation. Either `transparent` or + # `opaque`. + # + # @return [Symbol, OpenAI::Models::ImagesResponse::Background, nil] + optional :background, enum: -> { OpenAI::ImagesResponse::Background } + # @!attribute data + # The list of generated images. + # + # @return [Array, nil] + optional :data, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Image] } + + # @!attribute output_format + # The output format of the image generation. Either `png`, `webp`, or `jpeg`. + # + # @return [Symbol, OpenAI::Models::ImagesResponse::OutputFormat, nil] + optional :output_format, enum: -> { OpenAI::ImagesResponse::OutputFormat } + + # @!attribute quality + # The quality of the image generated. Either `low`, `medium`, or `high`. + # + # @return [Symbol, OpenAI::Models::ImagesResponse::Quality, nil] + optional :quality, enum: -> { OpenAI::ImagesResponse::Quality } + + # @!attribute size + # The size of the image generated. Either `1024x1024`, `1024x1536`, or + # `1536x1024`. + # + # @return [Symbol, OpenAI::Models::ImagesResponse::Size, nil] + optional :size, enum: -> { OpenAI::ImagesResponse::Size } + + # @!attribute usage + # For `gpt-image-1` only, the token usage information for the image generation. + # + # @return [OpenAI::Models::ImagesResponse::Usage, nil] + optional :usage, -> { OpenAI::ImagesResponse::Usage } + + # @!method initialize(created:, background: nil, data: nil, output_format: nil, quality: nil, size: nil, usage: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImagesResponse} for more details. + # + # The response from the image generation endpoint. + # + # @param created [Integer] The Unix timestamp (in seconds) of when the image was created. + # + # @param background [Symbol, OpenAI::Models::ImagesResponse::Background] The background parameter used for the image generation. Either `transparent` or + # + # @param data [Array] The list of generated images. + # + # @param output_format [Symbol, OpenAI::Models::ImagesResponse::OutputFormat] The output format of the image generation. Either `png`, `webp`, or `jpeg`. + # + # @param quality [Symbol, OpenAI::Models::ImagesResponse::Quality] The quality of the image generated. Either `low`, `medium`, or `high`. + # + # @param size [Symbol, OpenAI::Models::ImagesResponse::Size] The size of the image generated. Either `1024x1024`, `1024x1536`, or `1536x1024` + # + # @param usage [OpenAI::Models::ImagesResponse::Usage] For `gpt-image-1` only, the token usage information for the image generation. + + # The background parameter used for the image generation. Either `transparent` or + # `opaque`. # - # @return [Array] - required :data, -> { OpenAI::ArrayOf[OpenAI::Models::Image] } + # @see OpenAI::Models::ImagesResponse#background + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + + # @!method self.values + # @return [Array] + end + + # The output format of the image generation. Either `png`, `webp`, or `jpeg`. + # + # @see OpenAI::Models::ImagesResponse#output_format + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + WEBP = :webp + JPEG = :jpeg + + # @!method self.values + # @return [Array] + end + + # The quality of the image generated. Either `low`, `medium`, or `high`. + # + # @see OpenAI::Models::ImagesResponse#quality + module Quality + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + + # @!method self.values + # @return [Array] + end + + # The size of the image generated. Either `1024x1024`, `1024x1536`, or + # `1536x1024`. + # + # @see OpenAI::Models::ImagesResponse#size + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024 = :"1024x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_1536X1024 = :"1536x1024" + + # @!method self.values + # @return [Array] + end + + # @see OpenAI::Models::ImagesResponse#usage + class Usage < OpenAI::Internal::Type::BaseModel + # @!attribute input_tokens + # The number of tokens (images and text) in the input prompt. + # + # @return [Integer] + required :input_tokens, Integer + + # @!attribute input_tokens_details + # The input tokens detailed information for the image generation. + # + # @return [OpenAI::Models::ImagesResponse::Usage::InputTokensDetails] + required :input_tokens_details, -> { OpenAI::ImagesResponse::Usage::InputTokensDetails } + + # @!attribute output_tokens + # The number of output tokens generated by the model. + # + # @return [Integer] + required :output_tokens, Integer + + # @!attribute total_tokens + # The total number of tokens (images and text) used for the image generation. + # + # @return [Integer] + required :total_tokens, Integer + + # @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:) + # For `gpt-image-1` only, the token usage information for the image generation. + # + # @param input_tokens [Integer] The number of tokens (images and text) in the input prompt. + # + # @param input_tokens_details [OpenAI::Models::ImagesResponse::Usage::InputTokensDetails] The input tokens detailed information for the image generation. + # + # @param output_tokens [Integer] The number of output tokens generated by the model. + # + # @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation. + + # @see OpenAI::Models::ImagesResponse::Usage#input_tokens_details + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + # @!attribute image_tokens + # The number of image tokens in the input prompt. + # + # @return [Integer] + required :image_tokens, Integer - # @!parse - # # @param created [Integer] - # # @param data [Array] - # # - # def initialize(created:, data:, **) = super + # @!attribute text_tokens + # The number of text tokens in the input prompt. + # + # @return [Integer] + required :text_tokens, Integer - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(image_tokens:, text_tokens:) + # The input tokens detailed information for the image generation. + # + # @param image_tokens [Integer] The number of image tokens in the input prompt. + # + # @param text_tokens [Integer] The number of text tokens in the input prompt. + end + end end end end diff --git a/lib/openai/models/metadata.rb b/lib/openai/models/metadata.rb index fea222e0..7930ffac 100644 --- a/lib/openai/models/metadata.rb +++ b/lib/openai/models/metadata.rb @@ -2,6 +2,7 @@ module OpenAI module Models - Metadata = OpenAI::HashOf[String] + # @type [OpenAI::Internal::Type::Converter] + Metadata = OpenAI::Internal::Type::HashOf[String] end end diff --git a/lib/openai/models/model.rb b/lib/openai/models/model.rb index 1827ed7b..06721bf5 100644 --- a/lib/openai/models/model.rb +++ b/lib/openai/models/model.rb @@ -2,7 +2,8 @@ module OpenAI module Models - class Model < OpenAI::BaseModel + # @see OpenAI::Resources::Models#retrieve + class Model < OpenAI::Internal::Type::BaseModel # @!attribute id # The model identifier, which can be referenced in the API endpoints. # @@ -27,17 +28,16 @@ class Model < OpenAI::BaseModel # @return [String] required :owned_by, String - # @!parse - # # Describes an OpenAI model offering that can be used with the API. - # # - # # @param id [String] - # # @param created [Integer] - # # @param owned_by [String] - # # @param object [Symbol, :model] - # # - # def initialize(id:, created:, owned_by:, object: :model, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, created:, owned_by:, object: :model) + # Describes an OpenAI model offering that can be used with the API. + # + # @param id [String] The model identifier, which can be referenced in the API endpoints. + # + # @param created [Integer] The Unix timestamp (in seconds) when the model was created. + # + # @param owned_by [String] The organization that owns the model. + # + # @param object [Symbol, :model] The object type, which is always "model". end end end diff --git a/lib/openai/models/model_delete_params.rb b/lib/openai/models/model_delete_params.rb index c5414ed7..758a1682 100644 --- a/lib/openai/models/model_delete_params.rb +++ b/lib/openai/models/model_delete_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class ModelDeleteParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Models#delete + class ModelDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/model_deleted.rb b/lib/openai/models/model_deleted.rb index 72d0b807..612bc76c 100644 --- a/lib/openai/models/model_deleted.rb +++ b/lib/openai/models/model_deleted.rb @@ -2,7 +2,8 @@ module OpenAI module Models - class ModelDeleted < OpenAI::BaseModel + # @see OpenAI::Resources::Models#delete + class ModelDeleted < OpenAI::Internal::Type::BaseModel # @!attribute id # # @return [String] @@ -11,21 +12,17 @@ class ModelDeleted < OpenAI::BaseModel # @!attribute deleted # # @return [Boolean] - required :deleted, OpenAI::BooleanModel + required :deleted, OpenAI::Internal::Type::Boolean # @!attribute object # # @return [String] required :object, String - # @!parse - # # @param id [String] - # # @param deleted [Boolean] - # # @param object [String] - # # - # def initialize(id:, deleted:, object:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, deleted:, object:) + # @param id [String] + # @param deleted [Boolean] + # @param object [String] end end end diff --git a/lib/openai/models/model_list_params.rb b/lib/openai/models/model_list_params.rb index 37bd5c67..77d83f84 100644 --- a/lib/openai/models/model_list_params.rb +++ b/lib/openai/models/model_list_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class ModelListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Models#list + class ModelListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/model_retrieve_params.rb b/lib/openai/models/model_retrieve_params.rb index a8779854..deec29e7 100644 --- a/lib/openai/models/model_retrieve_params.rb +++ b/lib/openai/models/model_retrieve_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class ModelRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Models#retrieve + class ModelRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/moderation.rb b/lib/openai/models/moderation.rb index 98a25176..ccc71730 100644 --- a/lib/openai/models/moderation.rb +++ b/lib/openai/models/moderation.rb @@ -2,204 +2,207 @@ module OpenAI module Models - class Moderation < OpenAI::BaseModel + class Moderation < OpenAI::Internal::Type::BaseModel # @!attribute categories # A list of the categories, and whether they are flagged or not. # # @return [OpenAI::Models::Moderation::Categories] - required :categories, -> { OpenAI::Models::Moderation::Categories } + required :categories, -> { OpenAI::Moderation::Categories } # @!attribute category_applied_input_types # A list of the categories along with the input type(s) that the score applies to. # # @return [OpenAI::Models::Moderation::CategoryAppliedInputTypes] - required :category_applied_input_types, -> { OpenAI::Models::Moderation::CategoryAppliedInputTypes } + required :category_applied_input_types, -> { OpenAI::Moderation::CategoryAppliedInputTypes } # @!attribute category_scores # A list of the categories along with their scores as predicted by model. # # @return [OpenAI::Models::Moderation::CategoryScores] - required :category_scores, -> { OpenAI::Models::Moderation::CategoryScores } + required :category_scores, -> { OpenAI::Moderation::CategoryScores } # @!attribute flagged # Whether any of the below categories are flagged. # # @return [Boolean] - required :flagged, OpenAI::BooleanModel + required :flagged, OpenAI::Internal::Type::Boolean - # @!parse - # # @param categories [OpenAI::Models::Moderation::Categories] - # # @param category_applied_input_types [OpenAI::Models::Moderation::CategoryAppliedInputTypes] - # # @param category_scores [OpenAI::Models::Moderation::CategoryScores] - # # @param flagged [Boolean] - # # - # def initialize(categories:, category_applied_input_types:, category_scores:, flagged:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(categories:, category_applied_input_types:, category_scores:, flagged:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Moderation} for more details. + # + # @param categories [OpenAI::Models::Moderation::Categories] A list of the categories, and whether they are flagged or not. + # + # @param category_applied_input_types [OpenAI::Models::Moderation::CategoryAppliedInputTypes] A list of the categories along with the input type(s) that the score applies to. + # + # @param category_scores [OpenAI::Models::Moderation::CategoryScores] A list of the categories along with their scores as predicted by model. + # + # @param flagged [Boolean] Whether any of the below categories are flagged. - class Categories < OpenAI::BaseModel + # @see OpenAI::Models::Moderation#categories + class Categories < OpenAI::Internal::Type::BaseModel # @!attribute harassment # Content that expresses, incites, or promotes harassing language towards any - # target. + # target. # # @return [Boolean] - required :harassment, OpenAI::BooleanModel + required :harassment, OpenAI::Internal::Type::Boolean # @!attribute harassment_threatening # Harassment content that also includes violence or serious harm towards any - # target. + # target. # # @return [Boolean] - required :harassment_threatening, OpenAI::BooleanModel, api_name: :"harassment/threatening" + required :harassment_threatening, OpenAI::Internal::Type::Boolean, api_name: :"harassment/threatening" # @!attribute hate # Content that expresses, incites, or promotes hate based on race, gender, - # ethnicity, religion, nationality, sexual orientation, disability status, or - # caste. Hateful content aimed at non-protected groups (e.g., chess players) is - # harassment. + # ethnicity, religion, nationality, sexual orientation, disability status, or + # caste. Hateful content aimed at non-protected groups (e.g., chess players) is + # harassment. # # @return [Boolean] - required :hate, OpenAI::BooleanModel + required :hate, OpenAI::Internal::Type::Boolean # @!attribute hate_threatening # Hateful content that also includes violence or serious harm towards the targeted - # group based on race, gender, ethnicity, religion, nationality, sexual - # orientation, disability status, or caste. + # group based on race, gender, ethnicity, religion, nationality, sexual + # orientation, disability status, or caste. # # @return [Boolean] - required :hate_threatening, OpenAI::BooleanModel, api_name: :"hate/threatening" + required :hate_threatening, OpenAI::Internal::Type::Boolean, api_name: :"hate/threatening" # @!attribute illicit # Content that includes instructions or advice that facilitate the planning or - # execution of wrongdoing, or that gives advice or instruction on how to commit - # illicit acts. For example, "how to shoplift" would fit this category. + # execution of wrongdoing, or that gives advice or instruction on how to commit + # illicit acts. For example, "how to shoplift" would fit this category. # # @return [Boolean, nil] - required :illicit, OpenAI::BooleanModel, nil?: true + required :illicit, OpenAI::Internal::Type::Boolean, nil?: true # @!attribute illicit_violent # Content that includes instructions or advice that facilitate the planning or - # execution of wrongdoing that also includes violence, or that gives advice or - # instruction on the procurement of any weapon. + # execution of wrongdoing that also includes violence, or that gives advice or + # instruction on the procurement of any weapon. # # @return [Boolean, nil] - required :illicit_violent, OpenAI::BooleanModel, api_name: :"illicit/violent", nil?: true + required :illicit_violent, OpenAI::Internal::Type::Boolean, api_name: :"illicit/violent", nil?: true # @!attribute self_harm # Content that promotes, encourages, or depicts acts of self-harm, such as - # suicide, cutting, and eating disorders. + # suicide, cutting, and eating disorders. # # @return [Boolean] - required :self_harm, OpenAI::BooleanModel, api_name: :"self-harm" + required :self_harm, OpenAI::Internal::Type::Boolean, api_name: :"self-harm" # @!attribute self_harm_instructions # Content that encourages performing acts of self-harm, such as suicide, cutting, - # and eating disorders, or that gives instructions or advice on how to commit such - # acts. + # and eating disorders, or that gives instructions or advice on how to commit such + # acts. # # @return [Boolean] - required :self_harm_instructions, OpenAI::BooleanModel, api_name: :"self-harm/instructions" + required :self_harm_instructions, OpenAI::Internal::Type::Boolean, api_name: :"self-harm/instructions" # @!attribute self_harm_intent # Content where the speaker expresses that they are engaging or intend to engage - # in acts of self-harm, such as suicide, cutting, and eating disorders. + # in acts of self-harm, such as suicide, cutting, and eating disorders. # # @return [Boolean] - required :self_harm_intent, OpenAI::BooleanModel, api_name: :"self-harm/intent" + required :self_harm_intent, OpenAI::Internal::Type::Boolean, api_name: :"self-harm/intent" # @!attribute sexual # Content meant to arouse sexual excitement, such as the description of sexual - # activity, or that promotes sexual services (excluding sex education and - # wellness). + # activity, or that promotes sexual services (excluding sex education and + # wellness). # # @return [Boolean] - required :sexual, OpenAI::BooleanModel + required :sexual, OpenAI::Internal::Type::Boolean # @!attribute sexual_minors # Sexual content that includes an individual who is under 18 years old. # # @return [Boolean] - required :sexual_minors, OpenAI::BooleanModel, api_name: :"sexual/minors" + required :sexual_minors, OpenAI::Internal::Type::Boolean, api_name: :"sexual/minors" # @!attribute violence # Content that depicts death, violence, or physical injury. # # @return [Boolean] - required :violence, OpenAI::BooleanModel + required :violence, OpenAI::Internal::Type::Boolean # @!attribute violence_graphic # Content that depicts death, violence, or physical injury in graphic detail. # # @return [Boolean] - required :violence_graphic, OpenAI::BooleanModel, api_name: :"violence/graphic" - - # @!parse - # # A list of the categories, and whether they are flagged or not. - # # - # # @param harassment [Boolean] - # # @param harassment_threatening [Boolean] - # # @param hate [Boolean] - # # @param hate_threatening [Boolean] - # # @param illicit [Boolean, nil] - # # @param illicit_violent [Boolean, nil] - # # @param self_harm [Boolean] - # # @param self_harm_instructions [Boolean] - # # @param self_harm_intent [Boolean] - # # @param sexual [Boolean] - # # @param sexual_minors [Boolean] - # # @param violence [Boolean] - # # @param violence_graphic [Boolean] - # # - # def initialize( - # harassment:, - # harassment_threatening:, - # hate:, - # hate_threatening:, - # illicit:, - # illicit_violent:, - # self_harm:, - # self_harm_instructions:, - # self_harm_intent:, - # sexual:, - # sexual_minors:, - # violence:, - # violence_graphic:, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void + required :violence_graphic, OpenAI::Internal::Type::Boolean, api_name: :"violence/graphic" + + # @!method initialize(harassment:, harassment_threatening:, hate:, hate_threatening:, illicit:, illicit_violent:, self_harm:, self_harm_instructions:, self_harm_intent:, sexual:, sexual_minors:, violence:, violence_graphic:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Moderation::Categories} for more details. + # + # A list of the categories, and whether they are flagged or not. + # + # @param harassment [Boolean] Content that expresses, incites, or promotes harassing language towards any targ + # + # @param harassment_threatening [Boolean] Harassment content that also includes violence or serious harm towards any targe + # + # @param hate [Boolean] Content that expresses, incites, or promotes hate based on race, gender, ethnici + # + # @param hate_threatening [Boolean] Hateful content that also includes violence or serious harm towards the targeted + # + # @param illicit [Boolean, nil] Content that includes instructions or advice that facilitate the planning or exe + # + # @param illicit_violent [Boolean, nil] Content that includes instructions or advice that facilitate the planning or exe + # + # @param self_harm [Boolean] Content that promotes, encourages, or depicts acts of self-harm, such as suicide + # + # @param self_harm_instructions [Boolean] Content that encourages performing acts of self-harm, such as suicide, cutting, + # + # @param self_harm_intent [Boolean] Content where the speaker expresses that they are engaging or intend to engage i + # + # @param sexual [Boolean] Content meant to arouse sexual excitement, such as the description of sexual act + # + # @param sexual_minors [Boolean] Sexual content that includes an individual who is under 18 years old. + # + # @param violence [Boolean] Content that depicts death, violence, or physical injury. + # + # @param violence_graphic [Boolean] Content that depicts death, violence, or physical injury in graphic detail. end - class CategoryAppliedInputTypes < OpenAI::BaseModel + # @see OpenAI::Models::Moderation#category_applied_input_types + class CategoryAppliedInputTypes < OpenAI::Internal::Type::BaseModel # @!attribute harassment # The applied input type(s) for the category 'harassment'. # # @return [Array] required :harassment, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::Harassment] } + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Harassment] } # @!attribute harassment_threatening # The applied input type(s) for the category 'harassment/threatening'. # # @return [Array] required :harassment_threatening, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::HarassmentThreatening] }, + -> { + OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::HarassmentThreatening] + }, api_name: :"harassment/threatening" # @!attribute hate # The applied input type(s) for the category 'hate'. # # @return [Array] - required :hate, -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::Hate] } + required :hate, + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Hate] } # @!attribute hate_threatening # The applied input type(s) for the category 'hate/threatening'. # # @return [Array] required :hate_threatening, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::HateThreatening] }, + -> { + OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::HateThreatening] + }, api_name: :"hate/threatening" # @!attribute illicit @@ -207,14 +210,16 @@ class CategoryAppliedInputTypes < OpenAI::BaseModel # # @return [Array] required :illicit, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::Illicit] } + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Illicit] } # @!attribute illicit_violent # The applied input type(s) for the category 'illicit/violent'. # # @return [Array] required :illicit_violent, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::IllicitViolent] }, + -> { + OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::IllicitViolent] + }, api_name: :"illicit/violent" # @!attribute self_harm @@ -222,7 +227,9 @@ class CategoryAppliedInputTypes < OpenAI::BaseModel # # @return [Array] required :self_harm, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::SelfHarm] }, + -> { + OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarm] + }, api_name: :"self-harm" # @!attribute self_harm_instructions @@ -230,7 +237,9 @@ class CategoryAppliedInputTypes < OpenAI::BaseModel # # @return [Array] required :self_harm_instructions, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction] }, + -> { + OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction] + }, api_name: :"self-harm/instructions" # @!attribute self_harm_intent @@ -238,7 +247,9 @@ class CategoryAppliedInputTypes < OpenAI::BaseModel # # @return [Array] required :self_harm_intent, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::SelfHarmIntent] }, + -> { + OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmIntent] + }, api_name: :"self-harm/intent" # @!attribute sexual @@ -246,14 +257,16 @@ class CategoryAppliedInputTypes < OpenAI::BaseModel # # @return [Array] required :sexual, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::Sexual] } + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Sexual] } # @!attribute sexual_minors # The applied input type(s) for the category 'sexual/minors'. # # @return [Array] required :sexual_minors, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::SexualMinor] }, + -> { + OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::SexualMinor] + }, api_name: :"sexual/minors" # @!attribute violence @@ -261,166 +274,173 @@ class CategoryAppliedInputTypes < OpenAI::BaseModel # # @return [Array] required :violence, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::Violence] } + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Violence] } # @!attribute violence_graphic # The applied input type(s) for the category 'violence/graphic'. # # @return [Array] required :violence_graphic, - -> { OpenAI::ArrayOf[enum: OpenAI::Models::Moderation::CategoryAppliedInputTypes::ViolenceGraphic] }, + -> { + OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::ViolenceGraphic] + }, api_name: :"violence/graphic" - # @!parse - # # A list of the categories along with the input type(s) that the score applies to. - # # - # # @param harassment [Array] - # # @param harassment_threatening [Array] - # # @param hate [Array] - # # @param hate_threatening [Array] - # # @param illicit [Array] - # # @param illicit_violent [Array] - # # @param self_harm [Array] - # # @param self_harm_instructions [Array] - # # @param self_harm_intent [Array] - # # @param sexual [Array] - # # @param sexual_minors [Array] - # # @param violence [Array] - # # @param violence_graphic [Array] - # # - # def initialize( - # harassment:, - # harassment_threatening:, - # hate:, - # hate_threatening:, - # illicit:, - # illicit_violent:, - # self_harm:, - # self_harm_instructions:, - # self_harm_intent:, - # sexual:, - # sexual_minors:, - # violence:, - # violence_graphic:, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - class Harassment < OpenAI::Enum + # @!method initialize(harassment:, harassment_threatening:, hate:, hate_threatening:, illicit:, illicit_violent:, self_harm:, self_harm_instructions:, self_harm_intent:, sexual:, sexual_minors:, violence:, violence_graphic:) + # A list of the categories along with the input type(s) that the score applies to. + # + # @param harassment [Array] The applied input type(s) for the category 'harassment'. + # + # @param harassment_threatening [Array] The applied input type(s) for the category 'harassment/threatening'. + # + # @param hate [Array] The applied input type(s) for the category 'hate'. + # + # @param hate_threatening [Array] The applied input type(s) for the category 'hate/threatening'. + # + # @param illicit [Array] The applied input type(s) for the category 'illicit'. + # + # @param illicit_violent [Array] The applied input type(s) for the category 'illicit/violent'. + # + # @param self_harm [Array] The applied input type(s) for the category 'self-harm'. + # + # @param self_harm_instructions [Array] The applied input type(s) for the category 'self-harm/instructions'. + # + # @param self_harm_intent [Array] The applied input type(s) for the category 'self-harm/intent'. + # + # @param sexual [Array] The applied input type(s) for the category 'sexual'. + # + # @param sexual_minors [Array] The applied input type(s) for the category 'sexual/minors'. + # + # @param violence [Array] The applied input type(s) for the category 'violence'. + # + # @param violence_graphic [Array] The applied input type(s) for the category 'violence/graphic'. + + module Harassment + extend OpenAI::Internal::Type::Enum + TEXT = :text - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class HarassmentThreatening < OpenAI::Enum + module HarassmentThreatening + extend OpenAI::Internal::Type::Enum + TEXT = :text - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class Hate < OpenAI::Enum + module Hate + extend OpenAI::Internal::Type::Enum + TEXT = :text - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class HateThreatening < OpenAI::Enum + module HateThreatening + extend OpenAI::Internal::Type::Enum + TEXT = :text - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class Illicit < OpenAI::Enum + module Illicit + extend OpenAI::Internal::Type::Enum + TEXT = :text - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class IllicitViolent < OpenAI::Enum + module IllicitViolent + extend OpenAI::Internal::Type::Enum + TEXT = :text - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class SelfHarm < OpenAI::Enum + module SelfHarm + extend OpenAI::Internal::Type::Enum + TEXT = :text IMAGE = :image - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class SelfHarmInstruction < OpenAI::Enum + module SelfHarmInstruction + extend OpenAI::Internal::Type::Enum + TEXT = :text IMAGE = :image - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class SelfHarmIntent < OpenAI::Enum + module SelfHarmIntent + extend OpenAI::Internal::Type::Enum + TEXT = :text IMAGE = :image - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class Sexual < OpenAI::Enum + module Sexual + extend OpenAI::Internal::Type::Enum + TEXT = :text IMAGE = :image - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class SexualMinor < OpenAI::Enum + module SexualMinor + extend OpenAI::Internal::Type::Enum + TEXT = :text - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class Violence < OpenAI::Enum + module Violence + extend OpenAI::Internal::Type::Enum + TEXT = :text IMAGE = :image - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class ViolenceGraphic < OpenAI::Enum + module ViolenceGraphic + extend OpenAI::Internal::Type::Enum + TEXT = :text IMAGE = :image - finalize! + # @!method self.values + # @return [Array] end end - class CategoryScores < OpenAI::BaseModel + # @see OpenAI::Models::Moderation#category_scores + class CategoryScores < OpenAI::Internal::Type::BaseModel # @!attribute harassment # The score for the category 'harassment'. # @@ -499,43 +519,34 @@ class CategoryScores < OpenAI::BaseModel # @return [Float] required :violence_graphic, Float, api_name: :"violence/graphic" - # @!parse - # # A list of the categories along with their scores as predicted by model. - # # - # # @param harassment [Float] - # # @param harassment_threatening [Float] - # # @param hate [Float] - # # @param hate_threatening [Float] - # # @param illicit [Float] - # # @param illicit_violent [Float] - # # @param self_harm [Float] - # # @param self_harm_instructions [Float] - # # @param self_harm_intent [Float] - # # @param sexual [Float] - # # @param sexual_minors [Float] - # # @param violence [Float] - # # @param violence_graphic [Float] - # # - # def initialize( - # harassment:, - # harassment_threatening:, - # hate:, - # hate_threatening:, - # illicit:, - # illicit_violent:, - # self_harm:, - # self_harm_instructions:, - # self_harm_intent:, - # sexual:, - # sexual_minors:, - # violence:, - # violence_graphic:, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(harassment:, harassment_threatening:, hate:, hate_threatening:, illicit:, illicit_violent:, self_harm:, self_harm_instructions:, self_harm_intent:, sexual:, sexual_minors:, violence:, violence_graphic:) + # A list of the categories along with their scores as predicted by model. + # + # @param harassment [Float] The score for the category 'harassment'. + # + # @param harassment_threatening [Float] The score for the category 'harassment/threatening'. + # + # @param hate [Float] The score for the category 'hate'. + # + # @param hate_threatening [Float] The score for the category 'hate/threatening'. + # + # @param illicit [Float] The score for the category 'illicit'. + # + # @param illicit_violent [Float] The score for the category 'illicit/violent'. + # + # @param self_harm [Float] The score for the category 'self-harm'. + # + # @param self_harm_instructions [Float] The score for the category 'self-harm/instructions'. + # + # @param self_harm_intent [Float] The score for the category 'self-harm/intent'. + # + # @param sexual [Float] The score for the category 'sexual'. + # + # @param sexual_minors [Float] The score for the category 'sexual/minors'. + # + # @param violence [Float] The score for the category 'violence'. + # + # @param violence_graphic [Float] The score for the category 'violence/graphic'. end end end diff --git a/lib/openai/models/moderation_create_params.rb b/lib/openai/models/moderation_create_params.rb index 17a49eb2..008c8b28 100644 --- a/lib/openai/models/moderation_create_params.rb +++ b/lib/openai/models/moderation_create_params.rb @@ -2,72 +2,78 @@ module OpenAI module Models - class ModerationCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Moderations#create + class ModerationCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute input # Input (or inputs) to classify. Can be a single string, an array of strings, or - # an array of multi-modal input objects similar to other models. + # an array of multi-modal input objects similar to other models. # # @return [String, Array, Array] - required :input, union: -> { OpenAI::Models::ModerationCreateParams::Input } + required :input, union: -> { OpenAI::ModerationCreateParams::Input } - # @!attribute [r] model + # @!attribute model # The content moderation model you would like to use. Learn more in - # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and - # learn about available models - # [here](https://platform.openai.com/docs/models#moderation). + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). # # @return [String, Symbol, OpenAI::Models::ModerationModel, nil] - optional :model, union: -> { OpenAI::Models::ModerationCreateParams::Model } - - # @!parse - # # @return [String, Symbol, OpenAI::Models::ModerationModel] - # attr_writer :model - - # @!parse - # # @param input [String, Array, Array] - # # @param model [String, Symbol, OpenAI::Models::ModerationModel] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(input:, model: nil, request_options: {}, **) = super + optional :model, union: -> { OpenAI::ModerationCreateParams::Model } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(input:, model: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ModerationCreateParams} for more details. # - # Input (or inputs) to classify. Can be a single string, an array of strings, or - # an array of multi-modal input objects similar to other models. - class Input < OpenAI::Union - StringArray = OpenAI::ArrayOf[String] + # @param input [String, Array, Array] Input (or inputs) to classify. Can be a single string, an array of strings, or + # + # @param model [String, Symbol, OpenAI::Models::ModerationModel] The content moderation model you would like to use. Learn more in + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - ModerationMultiModalInputArray = OpenAI::ArrayOf[union: -> { OpenAI::Models::ModerationMultiModalInput }] + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. + module Input + extend OpenAI::Internal::Type::Union # A string of text to classify for moderation. variant String # An array of strings to classify for moderation. - variant OpenAI::Models::ModerationCreateParams::Input::StringArray + variant -> { OpenAI::Models::ModerationCreateParams::Input::StringArray } # An array of multi-modal inputs to the moderation model. - variant OpenAI::Models::ModerationCreateParams::Input::ModerationMultiModalInputArray + variant -> { OpenAI::Models::ModerationCreateParams::Input::ModerationMultiModalInputArray } + + # @!method self.variants + # @return [Array(String, Array, Array)] + + # @type [OpenAI::Internal::Type::Converter] + StringArray = OpenAI::Internal::Type::ArrayOf[String] + + # @type [OpenAI::Internal::Type::Converter] + ModerationMultiModalInputArray = + OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::ModerationMultiModalInput }] end - # @abstract - # # The content moderation model you would like to use. Learn more in - # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and - # learn about available models - # [here](https://platform.openai.com/docs/models#moderation). - class Model < OpenAI::Union + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). + module Model + extend OpenAI::Internal::Type::Union + variant String # The content moderation model you would like to use. Learn more in # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about # available models [here](https://platform.openai.com/docs/models#moderation). - variant enum: -> { OpenAI::Models::ModerationModel } + variant enum: -> { OpenAI::ModerationModel } + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::ModerationModel)] end end end diff --git a/lib/openai/models/moderation_create_response.rb b/lib/openai/models/moderation_create_response.rb index 2e9e3828..0085e8a8 100644 --- a/lib/openai/models/moderation_create_response.rb +++ b/lib/openai/models/moderation_create_response.rb @@ -2,7 +2,8 @@ module OpenAI module Models - class ModerationCreateResponse < OpenAI::BaseModel + # @see OpenAI::Resources::Moderations#create + class ModerationCreateResponse < OpenAI::Internal::Type::BaseModel # @!attribute id # The unique identifier for the moderation request. # @@ -19,18 +20,16 @@ class ModerationCreateResponse < OpenAI::BaseModel # A list of moderation objects. # # @return [Array] - required :results, -> { OpenAI::ArrayOf[OpenAI::Models::Moderation] } + required :results, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Moderation] } - # @!parse - # # Represents if a given text input is potentially harmful. - # # - # # @param id [String] - # # @param model [String] - # # @param results [Array] - # # - # def initialize(id:, model:, results:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, model:, results:) + # Represents if a given text input is potentially harmful. + # + # @param id [String] The unique identifier for the moderation request. + # + # @param model [String] The model used to generate the moderation results. + # + # @param results [Array] A list of moderation objects. end end end diff --git a/lib/openai/models/moderation_image_url_input.rb b/lib/openai/models/moderation_image_url_input.rb index de867149..dca658e1 100644 --- a/lib/openai/models/moderation_image_url_input.rb +++ b/lib/openai/models/moderation_image_url_input.rb @@ -2,12 +2,12 @@ module OpenAI module Models - class ModerationImageURLInput < OpenAI::BaseModel + class ModerationImageURLInput < OpenAI::Internal::Type::BaseModel # @!attribute image_url # Contains either an image URL or a data URL for a base64 encoded image. # # @return [OpenAI::Models::ModerationImageURLInput::ImageURL] - required :image_url, -> { OpenAI::Models::ModerationImageURLInput::ImageURL } + required :image_url, -> { OpenAI::ModerationImageURLInput::ImageURL } # @!attribute type # Always `image_url`. @@ -15,31 +15,25 @@ class ModerationImageURLInput < OpenAI::BaseModel # @return [Symbol, :image_url] required :type, const: :image_url - # @!parse - # # An object describing an image to classify. - # # - # # @param image_url [OpenAI::Models::ModerationImageURLInput::ImageURL] - # # @param type [Symbol, :image_url] - # # - # def initialize(image_url:, type: :image_url, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(image_url:, type: :image_url) + # An object describing an image to classify. + # + # @param image_url [OpenAI::Models::ModerationImageURLInput::ImageURL] Contains either an image URL or a data URL for a base64 encoded image. + # + # @param type [Symbol, :image_url] Always `image_url`. - class ImageURL < OpenAI::BaseModel + # @see OpenAI::Models::ModerationImageURLInput#image_url + class ImageURL < OpenAI::Internal::Type::BaseModel # @!attribute url # Either a URL of the image or the base64 encoded image data. # # @return [String] required :url, String - # @!parse - # # Contains either an image URL or a data URL for a base64 encoded image. - # # - # # @param url [String] - # # - # def initialize(url:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(url:) + # Contains either an image URL or a data URL for a base64 encoded image. + # + # @param url [String] Either a URL of the image or the base64 encoded image data. end end end diff --git a/lib/openai/models/moderation_model.rb b/lib/openai/models/moderation_model.rb index 2abe4a13..02d78035 100644 --- a/lib/openai/models/moderation_model.rb +++ b/lib/openai/models/moderation_model.rb @@ -2,15 +2,16 @@ module OpenAI module Models - # @abstract - # - class ModerationModel < OpenAI::Enum + module ModerationModel + extend OpenAI::Internal::Type::Enum + OMNI_MODERATION_LATEST = :"omni-moderation-latest" OMNI_MODERATION_2024_09_26 = :"omni-moderation-2024-09-26" TEXT_MODERATION_LATEST = :"text-moderation-latest" TEXT_MODERATION_STABLE = :"text-moderation-stable" - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/moderation_multi_modal_input.rb b/lib/openai/models/moderation_multi_modal_input.rb index d5f91171..8b89503d 100644 --- a/lib/openai/models/moderation_multi_modal_input.rb +++ b/lib/openai/models/moderation_multi_modal_input.rb @@ -2,17 +2,20 @@ module OpenAI module Models - # @abstract - # # An object describing an image to classify. - class ModerationMultiModalInput < OpenAI::Union + module ModerationMultiModalInput + extend OpenAI::Internal::Type::Union + discriminator :type # An object describing an image to classify. - variant :image_url, -> { OpenAI::Models::ModerationImageURLInput } + variant :image_url, -> { OpenAI::ModerationImageURLInput } # An object describing text to classify. - variant :text, -> { OpenAI::Models::ModerationTextInput } + variant :text, -> { OpenAI::ModerationTextInput } + + # @!method self.variants + # @return [Array(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] end end end diff --git a/lib/openai/models/moderation_text_input.rb b/lib/openai/models/moderation_text_input.rb index fbc192e8..087178f8 100644 --- a/lib/openai/models/moderation_text_input.rb +++ b/lib/openai/models/moderation_text_input.rb @@ -2,7 +2,7 @@ module OpenAI module Models - class ModerationTextInput < OpenAI::BaseModel + class ModerationTextInput < OpenAI::Internal::Type::BaseModel # @!attribute text # A string of text to classify. # @@ -15,15 +15,12 @@ class ModerationTextInput < OpenAI::BaseModel # @return [Symbol, :text] required :type, const: :text - # @!parse - # # An object describing text to classify. - # # - # # @param text [String] - # # @param type [Symbol, :text] - # # - # def initialize(text:, type: :text, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(text:, type: :text) + # An object describing text to classify. + # + # @param text [String] A string of text to classify. + # + # @param type [Symbol, :text] Always `text`. end end end diff --git a/lib/openai/models/other_file_chunking_strategy_object.rb b/lib/openai/models/other_file_chunking_strategy_object.rb index 0f7c6224..72fc9de8 100644 --- a/lib/openai/models/other_file_chunking_strategy_object.rb +++ b/lib/openai/models/other_file_chunking_strategy_object.rb @@ -2,23 +2,19 @@ module OpenAI module Models - class OtherFileChunkingStrategyObject < OpenAI::BaseModel + class OtherFileChunkingStrategyObject < OpenAI::Internal::Type::BaseModel # @!attribute type # Always `other`. # # @return [Symbol, :other] required :type, const: :other - # @!parse - # # This is returned when the chunking strategy is unknown. Typically, this is - # # because the file was indexed before the `chunking_strategy` concept was - # # introduced in the API. - # # - # # @param type [Symbol, :other] - # # - # def initialize(type: :other, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :other) + # This is returned when the chunking strategy is unknown. Typically, this is + # because the file was indexed before the `chunking_strategy` concept was + # introduced in the API. + # + # @param type [Symbol, :other] Always `other`. end end end diff --git a/lib/openai/models/reasoning.rb b/lib/openai/models/reasoning.rb index 28b7129f..f2718df3 100644 --- a/lib/openai/models/reasoning.rb +++ b/lib/openai/models/reasoning.rb @@ -2,53 +2,86 @@ module OpenAI module Models - class Reasoning < OpenAI::BaseModel + class Reasoning < OpenAI::Internal::Type::BaseModel # @!attribute effort - # **o-series models only** - # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] - required :effort, enum: -> { OpenAI::Models::ReasoningEffort }, nil?: true + optional :effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true # @!attribute generate_summary - # **o-series models only** + # @deprecated + # + # **Deprecated:** use `summary` instead. # - # A summary of the reasoning performed by the model. This can be useful for - # debugging and understanding the model's reasoning process. One of `concise` or - # `detailed`. + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `auto`, + # `concise`, or `detailed`. # # @return [Symbol, OpenAI::Models::Reasoning::GenerateSummary, nil] - optional :generate_summary, enum: -> { OpenAI::Models::Reasoning::GenerateSummary }, nil?: true + optional :generate_summary, enum: -> { OpenAI::Reasoning::GenerateSummary }, nil?: true - # @!parse - # # **o-series models only** - # # - # # Configuration options for - # # [reasoning models](https://platform.openai.com/docs/guides/reasoning). - # # - # # @param effort [Symbol, OpenAI::Models::ReasoningEffort, nil] - # # @param generate_summary [Symbol, OpenAI::Models::Reasoning::GenerateSummary, nil] - # # - # def initialize(effort:, generate_summary: nil, **) = super + # @!attribute summary + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `auto`, + # `concise`, or `detailed`. + # + # @return [Symbol, OpenAI::Models::Reasoning::Summary, nil] + optional :summary, enum: -> { OpenAI::Reasoning::Summary }, nil?: true - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(effort: nil, generate_summary: nil, summary: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Reasoning} for more details. + # + # **gpt-5 and o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # + # @param effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for + # + # @param generate_summary [Symbol, OpenAI::Models::Reasoning::GenerateSummary, nil] **Deprecated:** use `summary` instead. + # + # @param summary [Symbol, OpenAI::Models::Reasoning::Summary, nil] A summary of the reasoning performed by the model. This can be - # @abstract + # @deprecated # - # **o-series models only** + # **Deprecated:** use `summary` instead. # - # A summary of the reasoning performed by the model. This can be useful for - # debugging and understanding the model's reasoning process. One of `concise` or - # `detailed`. - class GenerateSummary < OpenAI::Enum + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `auto`, + # `concise`, or `detailed`. + # + # @see OpenAI::Models::Reasoning#generate_summary + module GenerateSummary + extend OpenAI::Internal::Type::Enum + + AUTO = :auto + CONCISE = :concise + DETAILED = :detailed + + # @!method self.values + # @return [Array] + end + + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `auto`, + # `concise`, or `detailed`. + # + # @see OpenAI::Models::Reasoning#summary + module Summary + extend OpenAI::Internal::Type::Enum + + AUTO = :auto CONCISE = :concise DETAILED = :detailed - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/reasoning_effort.rb b/lib/openai/models/reasoning_effort.rb index f1fee21d..486b6d31 100644 --- a/lib/openai/models/reasoning_effort.rb +++ b/lib/openai/models/reasoning_effort.rb @@ -2,20 +2,21 @@ module OpenAI module Models - # @abstract - # - # **o-series models only** - # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. - class ReasoningEffort < OpenAI::Enum + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + module ReasoningEffort + extend OpenAI::Internal::Type::Enum + + MINIMAL = :minimal LOW = :low MEDIUM = :medium HIGH = :high - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/response_format_json_object.rb b/lib/openai/models/response_format_json_object.rb index 2996332d..13a620f2 100644 --- a/lib/openai/models/response_format_json_object.rb +++ b/lib/openai/models/response_format_json_object.rb @@ -2,23 +2,19 @@ module OpenAI module Models - class ResponseFormatJSONObject < OpenAI::BaseModel + class ResponseFormatJSONObject < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of response format being defined. Always `json_object`. # # @return [Symbol, :json_object] required :type, const: :json_object - # @!parse - # # JSON object response format. An older method of generating JSON responses. Using - # # `json_schema` is recommended for models that support it. Note that the model - # # will not generate JSON without a system or user message instructing it to do so. - # # - # # @param type [Symbol, :json_object] - # # - # def initialize(type: :json_object, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :json_object) + # JSON object response format. An older method of generating JSON responses. Using + # `json_schema` is recommended for models that support it. Note that the model + # will not generate JSON without a system or user message instructing it to do so. + # + # @param type [Symbol, :json_object] The type of response format being defined. Always `json_object`. end end end diff --git a/lib/openai/models/response_format_json_schema.rb b/lib/openai/models/response_format_json_schema.rb index 907a4666..d8a941a3 100644 --- a/lib/openai/models/response_format_json_schema.rb +++ b/lib/openai/models/response_format_json_schema.rb @@ -2,12 +2,12 @@ module OpenAI module Models - class ResponseFormatJSONSchema < OpenAI::BaseModel + class ResponseFormatJSONSchema < OpenAI::Internal::Type::BaseModel # @!attribute json_schema # Structured Outputs configuration options, including a JSON Schema. # # @return [OpenAI::Models::ResponseFormatJSONSchema::JSONSchema] - required :json_schema, -> { OpenAI::Models::ResponseFormatJSONSchema::JSONSchema } + required :json_schema, -> { OpenAI::ResponseFormatJSONSchema::JSONSchema } # @!attribute type # The type of response format being defined. Always `json_schema`. @@ -15,69 +15,64 @@ class ResponseFormatJSONSchema < OpenAI::BaseModel # @return [Symbol, :json_schema] required :type, const: :json_schema - # @!parse - # # JSON Schema response format. Used to generate structured JSON responses. Learn - # # more about - # # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). - # # - # # @param json_schema [OpenAI::Models::ResponseFormatJSONSchema::JSONSchema] - # # @param type [Symbol, :json_schema] - # # - # def initialize(json_schema:, type: :json_schema, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(json_schema:, type: :json_schema) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ResponseFormatJSONSchema} for more details. + # + # JSON Schema response format. Used to generate structured JSON responses. Learn + # more about + # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + # + # @param json_schema [OpenAI::Models::ResponseFormatJSONSchema::JSONSchema] Structured Outputs configuration options, including a JSON Schema. + # + # @param type [Symbol, :json_schema] The type of response format being defined. Always `json_schema`. - class JSONSchema < OpenAI::BaseModel + # @see OpenAI::Models::ResponseFormatJSONSchema#json_schema + class JSONSchema < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores - # and dashes, with a maximum length of 64. + # and dashes, with a maximum length of 64. # # @return [String] required :name, String - # @!attribute [r] description + # @!attribute description # A description of what the response format is for, used by the model to determine - # how to respond in the format. + # how to respond in the format. # # @return [String, nil] optional :description, String - # @!parse - # # @return [String] - # attr_writer :description - - # @!attribute [r] schema + # @!attribute schema # The schema for the response format, described as a JSON Schema object. Learn how - # to build JSON schemas [here](https://json-schema.org/). + # to build JSON schemas [here](https://json-schema.org/). # # @return [Hash{Symbol=>Object}, nil] - optional :schema, OpenAI::HashOf[OpenAI::Unknown] - - # @!parse - # # @return [Hash{Symbol=>Object}] - # attr_writer :schema + optional :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] # @!attribute strict # Whether to enable strict schema adherence when generating the output. If set to - # true, the model will always follow the exact schema defined in the `schema` - # field. Only a subset of JSON Schema is supported when `strict` is `true`. To - # learn more, read the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # # @return [Boolean, nil] - optional :strict, OpenAI::BooleanModel, nil?: true + optional :strict, OpenAI::Internal::Type::Boolean, nil?: true - # @!parse - # # Structured Outputs configuration options, including a JSON Schema. - # # - # # @param name [String] - # # @param description [String] - # # @param schema [Hash{Symbol=>Object}] - # # @param strict [Boolean, nil] - # # - # def initialize(name:, description: nil, schema: nil, strict: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(name:, description: nil, schema: nil, strict: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ResponseFormatJSONSchema::JSONSchema} for more details. + # + # Structured Outputs configuration options, including a JSON Schema. + # + # @param name [String] The name of the response format. Must be a-z, A-Z, 0-9, or contain + # + # @param description [String] A description of what the response format is for, used by the model to + # + # @param schema [Hash{Symbol=>Object}] The schema for the response format, described as a JSON Schema object. + # + # @param strict [Boolean, nil] Whether to enable strict schema adherence when generating the output. end end end diff --git a/lib/openai/models/response_format_text.rb b/lib/openai/models/response_format_text.rb index 3821c9a0..a7ac56b0 100644 --- a/lib/openai/models/response_format_text.rb +++ b/lib/openai/models/response_format_text.rb @@ -2,21 +2,17 @@ module OpenAI module Models - class ResponseFormatText < OpenAI::BaseModel + class ResponseFormatText < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of response format being defined. Always `text`. # # @return [Symbol, :text] required :type, const: :text - # @!parse - # # Default response format. Used to generate text responses. - # # - # # @param type [Symbol, :text] - # # - # def initialize(type: :text, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :text) + # Default response format. Used to generate text responses. + # + # @param type [Symbol, :text] The type of response format being defined. Always `text`. end end end diff --git a/lib/openai/models/response_format_text_grammar.rb b/lib/openai/models/response_format_text_grammar.rb new file mode 100644 index 00000000..8d43e38f --- /dev/null +++ b/lib/openai/models/response_format_text_grammar.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ResponseFormatTextGrammar < OpenAI::Internal::Type::BaseModel + # @!attribute grammar + # The custom grammar for the model to follow. + # + # @return [String] + required :grammar, String + + # @!attribute type + # The type of response format being defined. Always `grammar`. + # + # @return [Symbol, :grammar] + required :type, const: :grammar + + # @!method initialize(grammar:, type: :grammar) + # A custom grammar for the model to follow when generating text. Learn more in the + # [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars). + # + # @param grammar [String] The custom grammar for the model to follow. + # + # @param type [Symbol, :grammar] The type of response format being defined. Always `grammar`. + end + end +end diff --git a/lib/openai/models/response_format_text_python.rb b/lib/openai/models/response_format_text_python.rb new file mode 100644 index 00000000..9e12a904 --- /dev/null +++ b/lib/openai/models/response_format_text_python.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ResponseFormatTextPython < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of response format being defined. Always `python`. + # + # @return [Symbol, :python] + required :type, const: :python + + # @!method initialize(type: :python) + # Configure the model to generate valid Python code. See the + # [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars) + # for more details. + # + # @param type [Symbol, :python] The type of response format being defined. Always `python`. + end + end +end diff --git a/lib/openai/models/responses/computer_tool.rb b/lib/openai/models/responses/computer_tool.rb index 65a0097a..b883865c 100644 --- a/lib/openai/models/responses/computer_tool.rb +++ b/lib/openai/models/responses/computer_tool.rb @@ -3,24 +3,24 @@ module OpenAI module Models module Responses - class ComputerTool < OpenAI::BaseModel + class ComputerTool < OpenAI::Internal::Type::BaseModel # @!attribute display_height # The height of the computer display. # - # @return [Float] - required :display_height, Float + # @return [Integer] + required :display_height, Integer # @!attribute display_width # The width of the computer display. # - # @return [Float] - required :display_width, Float + # @return [Integer] + required :display_width, Integer # @!attribute environment # The type of computer environment to control. # # @return [Symbol, OpenAI::Models::Responses::ComputerTool::Environment] - required :environment, enum: -> { OpenAI::Models::Responses::ComputerTool::Environment } + required :environment, enum: -> { OpenAI::Responses::ComputerTool::Environment } # @!attribute type # The type of the computer use tool. Always `computer_use_preview`. @@ -28,29 +28,32 @@ class ComputerTool < OpenAI::BaseModel # @return [Symbol, :computer_use_preview] required :type, const: :computer_use_preview - # @!parse - # # A tool that controls a virtual computer. Learn more about the - # # [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). - # # - # # @param display_height [Float] - # # @param display_width [Float] - # # @param environment [Symbol, OpenAI::Models::Responses::ComputerTool::Environment] - # # @param type [Symbol, :computer_use_preview] - # # - # def initialize(display_height:, display_width:, environment:, type: :computer_use_preview, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(display_height:, display_width:, environment:, type: :computer_use_preview) + # A tool that controls a virtual computer. Learn more about the + # [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + # + # @param display_height [Integer] The height of the computer display. + # + # @param display_width [Integer] The width of the computer display. + # + # @param environment [Symbol, OpenAI::Models::Responses::ComputerTool::Environment] The type of computer environment to control. # + # @param type [Symbol, :computer_use_preview] The type of the computer use tool. Always `computer_use_preview`. + # The type of computer environment to control. - class Environment < OpenAI::Enum - MAC = :mac + # + # @see OpenAI::Models::Responses::ComputerTool#environment + module Environment + extend OpenAI::Internal::Type::Enum + WINDOWS = :windows + MAC = :mac + LINUX = :linux UBUNTU = :ubuntu BROWSER = :browser - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/custom_tool.rb b/lib/openai/models/responses/custom_tool.rb new file mode 100644 index 00000000..05c3665f --- /dev/null +++ b/lib/openai/models/responses/custom_tool.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class CustomTool < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the custom tool, used to identify it in tool calls. + # + # @return [String] + required :name, String + + # @!attribute type + # The type of the custom tool. Always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!attribute description + # Optional description of the custom tool, used to provide more context. + # + # @return [String, nil] + optional :description, String + + # @!attribute format_ + # The input format for the custom tool. Default is unconstrained text. + # + # @return [OpenAI::Models::CustomToolInputFormat::Text, OpenAI::Models::CustomToolInputFormat::Grammar, nil] + optional :format_, union: -> { OpenAI::CustomToolInputFormat }, api_name: :format + + # @!method initialize(name:, description: nil, format_: nil, type: :custom) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::CustomTool} for more details. + # + # A custom tool that processes input using a specified format. Learn more about + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools). + # + # @param name [String] The name of the custom tool, used to identify it in tool calls. + # + # @param description [String] Optional description of the custom tool, used to provide more context. + # + # @param format_ [OpenAI::Models::CustomToolInputFormat::Text, OpenAI::Models::CustomToolInputFormat::Grammar] The input format for the custom tool. Default is unconstrained text. + # + # @param type [Symbol, :custom] The type of the custom tool. Always `custom`. + end + end + end +end diff --git a/lib/openai/models/responses/easy_input_message.rb b/lib/openai/models/responses/easy_input_message.rb index 280bc258..64f37584 100644 --- a/lib/openai/models/responses/easy_input_message.rb +++ b/lib/openai/models/responses/easy_input_message.rb @@ -3,79 +3,87 @@ module OpenAI module Models module Responses - class EasyInputMessage < OpenAI::BaseModel + class EasyInputMessage < OpenAI::Internal::Type::BaseModel # @!attribute content # Text, image, or audio input to the model, used to generate a response. Can also - # contain previous assistant responses. + # contain previous assistant responses. # # @return [String, Array] - required :content, union: -> { OpenAI::Models::Responses::EasyInputMessage::Content } + required :content, union: -> { OpenAI::Responses::EasyInputMessage::Content } # @!attribute role # The role of the message input. One of `user`, `assistant`, `system`, or - # `developer`. + # `developer`. # # @return [Symbol, OpenAI::Models::Responses::EasyInputMessage::Role] - required :role, enum: -> { OpenAI::Models::Responses::EasyInputMessage::Role } + required :role, enum: -> { OpenAI::Responses::EasyInputMessage::Role } - # @!attribute [r] type + # @!attribute type # The type of the message input. Always `message`. # # @return [Symbol, OpenAI::Models::Responses::EasyInputMessage::Type, nil] - optional :type, enum: -> { OpenAI::Models::Responses::EasyInputMessage::Type } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::EasyInputMessage::Type] - # attr_writer :type - - # @!parse - # # A message input to the model with a role indicating instruction following - # # hierarchy. Instructions given with the `developer` or `system` role take - # # precedence over instructions given with the `user` role. Messages with the - # # `assistant` role are presumed to have been generated by the model in previous - # # interactions. - # # - # # @param content [String, Array] - # # @param role [Symbol, OpenAI::Models::Responses::EasyInputMessage::Role] - # # @param type [Symbol, OpenAI::Models::Responses::EasyInputMessage::Type] - # # - # def initialize(content:, role:, type: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :type, enum: -> { OpenAI::Responses::EasyInputMessage::Type } - # @abstract + # @!method initialize(content:, role:, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::EasyInputMessage} for more details. # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + # + # @param content [String, Array] Text, image, or audio input to the model, used to generate a response. + # + # @param role [Symbol, OpenAI::Models::Responses::EasyInputMessage::Role] The role of the message input. One of `user`, `assistant`, `system`, or + # + # @param type [Symbol, OpenAI::Models::Responses::EasyInputMessage::Type] The type of the message input. Always `message`. + # Text, image, or audio input to the model, used to generate a response. Can also - # contain previous assistant responses. - class Content < OpenAI::Union + # contain previous assistant responses. + # + # @see OpenAI::Models::Responses::EasyInputMessage#content + module Content + extend OpenAI::Internal::Type::Union + # A text input to the model. variant String # A list of one or many input items to the model, containing different content # types. - variant -> { OpenAI::Models::Responses::ResponseInputMessageContentList } + variant -> { OpenAI::Responses::ResponseInputMessageContentList } + + # @!method self.variants + # @return [Array(String, Array)] end - # @abstract - # # The role of the message input. One of `user`, `assistant`, `system`, or - # `developer`. - class Role < OpenAI::Enum + # `developer`. + # + # @see OpenAI::Models::Responses::EasyInputMessage#role + module Role + extend OpenAI::Internal::Type::Enum + USER = :user ASSISTANT = :assistant SYSTEM = :system DEVELOPER = :developer - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The type of the message input. Always `message`. - class Type < OpenAI::Enum + # + # @see OpenAI::Models::Responses::EasyInputMessage#type + module Type + extend OpenAI::Internal::Type::Enum + MESSAGE = :message - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/file_search_tool.rb b/lib/openai/models/responses/file_search_tool.rb index 5c138fa3..aead0521 100644 --- a/lib/openai/models/responses/file_search_tool.rb +++ b/lib/openai/models/responses/file_search_tool.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class FileSearchTool < OpenAI::BaseModel + class FileSearchTool < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of the file search tool. Always `file_search`. # @@ -14,106 +14,98 @@ class FileSearchTool < OpenAI::BaseModel # The IDs of the vector stores to search. # # @return [Array] - required :vector_store_ids, OpenAI::ArrayOf[String] + required :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!attribute [r] filters - # A filter to apply based on file attributes. + # @!attribute filters + # A filter to apply. # # @return [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter, nil] - optional :filters, union: -> { OpenAI::Models::Responses::FileSearchTool::Filters } + optional :filters, union: -> { OpenAI::Responses::FileSearchTool::Filters }, nil?: true - # @!parse - # # @return [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] - # attr_writer :filters - - # @!attribute [r] max_num_results + # @!attribute max_num_results # The maximum number of results to return. This number should be between 1 and 50 - # inclusive. + # inclusive. # # @return [Integer, nil] optional :max_num_results, Integer - # @!parse - # # @return [Integer] - # attr_writer :max_num_results - - # @!attribute [r] ranking_options + # @!attribute ranking_options # Ranking options for search. # # @return [OpenAI::Models::Responses::FileSearchTool::RankingOptions, nil] - optional :ranking_options, -> { OpenAI::Models::Responses::FileSearchTool::RankingOptions } - - # @!parse - # # @return [OpenAI::Models::Responses::FileSearchTool::RankingOptions] - # attr_writer :ranking_options - - # @!parse - # # A tool that searches for relevant content from uploaded files. Learn more about - # # the - # # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). - # # - # # @param vector_store_ids [Array] - # # @param filters [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] - # # @param max_num_results [Integer] - # # @param ranking_options [OpenAI::Models::Responses::FileSearchTool::RankingOptions] - # # @param type [Symbol, :file_search] - # # - # def initialize(vector_store_ids:, filters: nil, max_num_results: nil, ranking_options: nil, type: :file_search, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + optional :ranking_options, -> { OpenAI::Responses::FileSearchTool::RankingOptions } + + # @!method initialize(vector_store_ids:, filters: nil, max_num_results: nil, ranking_options: nil, type: :file_search) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::FileSearchTool} for more details. + # + # A tool that searches for relevant content from uploaded files. Learn more about + # the + # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + # + # @param vector_store_ids [Array] The IDs of the vector stores to search. + # + # @param filters [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter, nil] A filter to apply. # - # A filter to apply based on file attributes. - class Filters < OpenAI::Union + # @param max_num_results [Integer] The maximum number of results to return. This number should be between 1 and 50 + # + # @param ranking_options [OpenAI::Models::Responses::FileSearchTool::RankingOptions] Ranking options for search. + # + # @param type [Symbol, :file_search] The type of the file search tool. Always `file_search`. + + # A filter to apply. + # + # @see OpenAI::Models::Responses::FileSearchTool#filters + module Filters + extend OpenAI::Internal::Type::Union + # A filter used to compare a specified attribute key to a given value using a defined comparison operation. - variant -> { OpenAI::Models::ComparisonFilter } + variant -> { OpenAI::ComparisonFilter } # Combine multiple filters using `and` or `or`. - variant -> { OpenAI::Models::CompoundFilter } + variant -> { OpenAI::CompoundFilter } + + # @!method self.variants + # @return [Array(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)] end - class RankingOptions < OpenAI::BaseModel - # @!attribute [r] ranker + # @see OpenAI::Models::Responses::FileSearchTool#ranking_options + class RankingOptions < OpenAI::Internal::Type::BaseModel + # @!attribute ranker # The ranker to use for the file search. # # @return [Symbol, OpenAI::Models::Responses::FileSearchTool::RankingOptions::Ranker, nil] - optional :ranker, enum: -> { OpenAI::Models::Responses::FileSearchTool::RankingOptions::Ranker } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::FileSearchTool::RankingOptions::Ranker] - # attr_writer :ranker + optional :ranker, enum: -> { OpenAI::Responses::FileSearchTool::RankingOptions::Ranker } - # @!attribute [r] score_threshold + # @!attribute score_threshold # The score threshold for the file search, a number between 0 and 1. Numbers - # closer to 1 will attempt to return only the most relevant results, but may - # return fewer results. + # closer to 1 will attempt to return only the most relevant results, but may + # return fewer results. # # @return [Float, nil] optional :score_threshold, Float - # @!parse - # # @return [Float] - # attr_writer :score_threshold - - # @!parse - # # Ranking options for search. - # # - # # @param ranker [Symbol, OpenAI::Models::Responses::FileSearchTool::RankingOptions::Ranker] - # # @param score_threshold [Float] - # # - # def initialize(ranker: nil, score_threshold: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(ranker: nil, score_threshold: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::FileSearchTool::RankingOptions} for more details. # + # Ranking options for search. + # + # @param ranker [Symbol, OpenAI::Models::Responses::FileSearchTool::RankingOptions::Ranker] The ranker to use for the file search. + # + # @param score_threshold [Float] The score threshold for the file search, a number between 0 and 1. Numbers close + # The ranker to use for the file search. - class Ranker < OpenAI::Enum + # + # @see OpenAI::Models::Responses::FileSearchTool::RankingOptions#ranker + module Ranker + extend OpenAI::Internal::Type::Enum + AUTO = :auto DEFAULT_2024_11_15 = :"default-2024-11-15" - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/function_tool.rb b/lib/openai/models/responses/function_tool.rb index 5e1906c6..f4db7602 100644 --- a/lib/openai/models/responses/function_tool.rb +++ b/lib/openai/models/responses/function_tool.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class FunctionTool < OpenAI::BaseModel + class FunctionTool < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the function to call. # @@ -13,14 +13,14 @@ class FunctionTool < OpenAI::BaseModel # @!attribute parameters # A JSON schema object describing the parameters of the function. # - # @return [Hash{Symbol=>Object}] - required :parameters, OpenAI::HashOf[OpenAI::Unknown] + # @return [Hash{Symbol=>Object}, nil] + required :parameters, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown], nil?: true # @!attribute strict # Whether to enforce strict parameter validation. Default `true`. # - # @return [Boolean] - required :strict, OpenAI::BooleanModel + # @return [Boolean, nil] + required :strict, OpenAI::Internal::Type::Boolean, nil?: true # @!attribute type # The type of the function tool. Always `function`. @@ -30,25 +30,28 @@ class FunctionTool < OpenAI::BaseModel # @!attribute description # A description of the function. Used by the model to determine whether or not to - # call the function. + # call the function. # # @return [String, nil] optional :description, String, nil?: true - # @!parse - # # Defines a function in your own code the model can choose to call. Learn more - # # about - # # [function calling](https://platform.openai.com/docs/guides/function-calling). - # # - # # @param name [String] - # # @param parameters [Hash{Symbol=>Object}] - # # @param strict [Boolean] - # # @param description [String, nil] - # # @param type [Symbol, :function] - # # - # def initialize(name:, parameters:, strict:, description: nil, type: :function, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(name:, parameters:, strict:, description: nil, type: :function) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::FunctionTool} for more details. + # + # Defines a function in your own code the model can choose to call. Learn more + # about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # + # @param name [String] The name of the function to call. + # + # @param parameters [Hash{Symbol=>Object}, nil] A JSON schema object describing the parameters of the function. + # + # @param strict [Boolean, nil] Whether to enforce strict parameter validation. Default `true`. + # + # @param description [String, nil] A description of the function. Used by the model to determine whether or not to + # + # @param type [Symbol, :function] The type of the function tool. Always `function`. end end end diff --git a/lib/openai/models/responses/input_item_list_params.rb b/lib/openai/models/responses/input_item_list_params.rb index e0cb2854..b627afea 100644 --- a/lib/openai/models/responses/input_item_list_params.rb +++ b/lib/openai/models/responses/input_item_list_params.rb @@ -3,77 +3,66 @@ module OpenAI module Models module Responses - class InputItemListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Responses::InputItems#list + class InputItemListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # An item ID to list items after, used in pagination. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] before - # An item ID to list items before, used in pagination. + # @!attribute include + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. # - # @return [String, nil] - optional :before, String - - # @!parse - # # @return [String] - # attr_writer :before + # @return [Array, nil] + optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] } - # @!attribute [r] limit + # @!attribute limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!attribute [r] order - # The order to return the input items in. Default is `asc`. + # @!attribute order + # The order to return the input items in. Default is `desc`. # - # - `asc`: Return the input items in ascending order. - # - `desc`: Return the input items in descending order. + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. # # @return [Symbol, OpenAI::Models::Responses::InputItemListParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::Responses::InputItemListParams::Order } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::InputItemListParams::Order] - # attr_writer :order - - # @!parse - # # @param after [String] - # # @param before [String] - # # @param limit [Integer] - # # @param order [Symbol, OpenAI::Models::Responses::InputItemListParams::Order] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, before: nil, limit: nil, order: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :order, enum: -> { OpenAI::Responses::InputItemListParams::Order } - # @abstract + # @!method initialize(after: nil, include: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::InputItemListParams} for more details. # - # The order to return the input items in. Default is `asc`. + # @param after [String] An item ID to list items after, used in pagination. # - # - `asc`: Return the input items in ascending order. - # - `desc`: Return the input items in descending order. - class Order < OpenAI::Enum + # @param include [Array] Additional fields to include in the response. See the `include` + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between + # + # @param order [Symbol, OpenAI::Models::Responses::InputItemListParams::Order] The order to return the input items in. Default is `desc`. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response.rb b/lib/openai/models/responses/response.rb index 4ff08aa8..ec153ad5 100644 --- a/lib/openai/models/responses/response.rb +++ b/lib/openai/models/responses/response.rb @@ -3,7 +3,10 @@ module OpenAI module Models module Responses - class Response < OpenAI::BaseModel + # @see OpenAI::Resources::Responses#create + # + # @see OpenAI::Resources::Responses#stream_raw + class Response < OpenAI::Internal::Type::BaseModel # @!attribute id # Unique identifier for this Response. # @@ -20,45 +23,44 @@ class Response < OpenAI::BaseModel # An error object returned when the model fails to generate a Response. # # @return [OpenAI::Models::Responses::ResponseError, nil] - required :error, -> { OpenAI::Models::Responses::ResponseError }, nil?: true + required :error, -> { OpenAI::Responses::ResponseError }, nil?: true # @!attribute incomplete_details # Details about why the response is incomplete. # # @return [OpenAI::Models::Responses::Response::IncompleteDetails, nil] - required :incomplete_details, -> { OpenAI::Models::Responses::Response::IncompleteDetails }, nil?: true + required :incomplete_details, -> { OpenAI::Responses::Response::IncompleteDetails }, nil?: true # @!attribute instructions - # Inserts a system (or developer) message as the first item in the model's - # context. + # A system (or developer) message inserted into the model's context. # - # When using along with `previous_response_id`, the instructions from a previous - # response will be not be carried over to the next response. This makes it simple - # to swap out system (or developer) messages in new responses. + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. # - # @return [String, nil] - required :instructions, String, nil?: true + # @return [String, Array, nil] + required :instructions, union: -> { OpenAI::Responses::Response::Instructions }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - required :metadata, OpenAI::HashOf[String], nil?: true + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute model - # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. # - # @return [String, Symbol, OpenAI::Models::ChatModel] - required :model, union: -> { OpenAI::Models::Responses::Response::Model } + # @return [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] + required :model, union: -> { OpenAI::ResponsesModel } # @!attribute object # The object type of this resource - always set to `response`. @@ -69,260 +71,344 @@ class Response < OpenAI::BaseModel # @!attribute output # An array of content items generated by the model. # - # - The length and order of items in the `output` array is dependent on the - # model's response. - # - Rather than accessing the first item in the `output` array and assuming it's - # an `assistant` message with the content generated by the model, you might - # consider using the `output_text` property where supported in SDKs. + # - The length and order of items in the `output` array is dependent on the + # model's response. + # - Rather than accessing the first item in the `output` array and assuming it's + # an `assistant` message with the content generated by the model, you might + # consider using the `output_text` property where supported in SDKs. # - # @return [Array] - required :output, -> { OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseOutputItem] } + # @return [Array] + required :output, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem] } # @!attribute parallel_tool_calls # Whether to allow the model to run tool calls in parallel. # # @return [Boolean] - required :parallel_tool_calls, OpenAI::BooleanModel + required :parallel_tool_calls, OpenAI::Internal::Type::Boolean # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. # # @return [Float, nil] required :temperature, Float, nil?: true # @!attribute tool_choice # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. # - # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] - required :tool_choice, union: -> { OpenAI::Models::Responses::Response::ToolChoice } + # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] + required :tool_choice, union: -> { OpenAI::Responses::Response::ToolChoice } # @!attribute tools # An array of tools the model may call while generating a response. You can - # specify which tool to use by setting the `tool_choice` parameter. + # specify which tool to use by setting the `tool_choice` parameter. # - # The two categories of tools you can provide the model are: + # The two categories of tools you can provide the model are: # - # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - # capabilities, like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search). - # Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). - # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. # - # @return [Array] - required :tools, -> { OpenAI::ArrayOf[union: OpenAI::Models::Responses::Tool] } + # @return [Array] + required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. # # @return [Float, nil] required :top_p, Float, nil?: true + # @!attribute background + # Whether to run the model response in the background. + # [Learn more](https://platform.openai.com/docs/guides/background). + # + # @return [Boolean, nil] + optional :background, OpenAI::Internal::Type::Boolean, nil?: true + + # @!attribute conversation + # The conversation that this response belongs to. Input items and output items + # from this response are automatically added to this conversation. + # + # @return [OpenAI::Models::Responses::Response::Conversation, nil] + optional :conversation, -> { OpenAI::Responses::Response::Conversation }, nil?: true + # @!attribute max_output_tokens # An upper bound for the number of tokens that can be generated for a response, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). # # @return [Integer, nil] optional :max_output_tokens, Integer, nil?: true + # @!attribute max_tool_calls + # The maximum number of total calls to built-in tools that can be processed in a + # response. This maximum number applies across all built-in tool calls, not per + # individual tool. Any further attempts to call a tool by the model will be + # ignored. + # + # @return [Integer, nil] + optional :max_tool_calls, Integer, nil?: true + # @!attribute previous_response_id # The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. Learn more about - # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # Cannot be used in conjunction with `conversation`. # # @return [String, nil] optional :previous_response_id, String, nil?: true + # @!attribute prompt + # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + # + # @return [OpenAI::Models::Responses::ResponsePrompt, nil] + optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true + + # @!attribute prompt_cache_key + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + # + # @return [String, nil] + optional :prompt_cache_key, String + # @!attribute reasoning - # **o-series models only** + # **gpt-5 and o-series models only** # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). # # @return [OpenAI::Models::Reasoning, nil] - optional :reasoning, -> { OpenAI::Models::Reasoning }, nil?: true + optional :reasoning, -> { OpenAI::Reasoning }, nil?: true - # @!attribute [r] status + # @!attribute safety_identifier + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + # + # @return [String, nil] + optional :safety_identifier, String + + # @!attribute service_tier + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + # + # @return [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] + optional :service_tier, enum: -> { OpenAI::Responses::Response::ServiceTier }, nil?: true + + # @!attribute status # The status of the response generation. One of `completed`, `failed`, - # `in_progress`, or `incomplete`. + # `in_progress`, `cancelled`, `queued`, or `incomplete`. # # @return [Symbol, OpenAI::Models::Responses::ResponseStatus, nil] - optional :status, enum: -> { OpenAI::Models::Responses::ResponseStatus } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseStatus] - # attr_writer :status + optional :status, enum: -> { OpenAI::Responses::ResponseStatus } - # @!attribute [r] text + # @!attribute text # Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # structured JSON data. Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) # # @return [OpenAI::Models::Responses::ResponseTextConfig, nil] - optional :text, -> { OpenAI::Models::Responses::ResponseTextConfig } + optional :text, -> { OpenAI::Responses::ResponseTextConfig } - # @!parse - # # @return [OpenAI::Models::Responses::ResponseTextConfig] - # attr_writer :text + # @!attribute top_logprobs + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # + # @return [Integer, nil] + optional :top_logprobs, Integer, nil?: true # @!attribute truncation # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. # # @return [Symbol, OpenAI::Models::Responses::Response::Truncation, nil] - optional :truncation, enum: -> { OpenAI::Models::Responses::Response::Truncation }, nil?: true + optional :truncation, enum: -> { OpenAI::Responses::Response::Truncation }, nil?: true - # @!attribute [r] usage + # @!attribute usage # Represents token usage details including input tokens, output tokens, a - # breakdown of output tokens, and the total tokens used. + # breakdown of output tokens, and the total tokens used. # # @return [OpenAI::Models::Responses::ResponseUsage, nil] - optional :usage, -> { OpenAI::Models::Responses::ResponseUsage } - - # @!parse - # # @return [OpenAI::Models::Responses::ResponseUsage] - # attr_writer :usage + optional :usage, -> { OpenAI::Responses::ResponseUsage } - # @!attribute [r] user - # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @!attribute user + # @deprecated + # + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). # # @return [String, nil] optional :user, String - # @!parse - # # @return [String] - # attr_writer :user - - # @!parse - # # @param id [String] - # # @param created_at [Float] - # # @param error [OpenAI::Models::Responses::ResponseError, nil] - # # @param incomplete_details [OpenAI::Models::Responses::Response::IncompleteDetails, nil] - # # @param instructions [String, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param model [String, Symbol, OpenAI::Models::ChatModel] - # # @param output [Array] - # # @param parallel_tool_calls [Boolean] - # # @param temperature [Float, nil] - # # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] - # # @param tools [Array] - # # @param top_p [Float, nil] - # # @param max_output_tokens [Integer, nil] - # # @param previous_response_id [String, nil] - # # @param reasoning [OpenAI::Models::Reasoning, nil] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] - # # @param text [OpenAI::Models::Responses::ResponseTextConfig] - # # @param truncation [Symbol, OpenAI::Models::Responses::Response::Truncation, nil] - # # @param usage [OpenAI::Models::Responses::ResponseUsage] - # # @param user [String] - # # @param object [Symbol, :response] - # # - # def initialize( - # id:, - # created_at:, - # error:, - # incomplete_details:, - # instructions:, - # metadata:, - # model:, - # output:, - # parallel_tool_calls:, - # temperature:, - # tool_choice:, - # tools:, - # top_p:, - # max_output_tokens: nil, - # previous_response_id: nil, - # reasoning: nil, - # status: nil, - # text: nil, - # truncation: nil, - # usage: nil, - # user: nil, - # object: :response, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class IncompleteDetails < OpenAI::BaseModel - # @!attribute [r] reason + # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, conversation: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::Response} for more details. + # + # @param id [String] Unique identifier for this Response. + # + # @param created_at [Float] Unix timestamp (in seconds) of when this Response was created. + # + # @param error [OpenAI::Models::Responses::ResponseError, nil] An error object returned when the model fails to generate a Response. + # + # @param incomplete_details [OpenAI::Models::Responses::Response::IncompleteDetails, nil] Details about why the response is incomplete. + # + # @param instructions [String, Array, nil] A system (or developer) message inserted into the model's context. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI + # + # @param output [Array] An array of content items generated by the model. + # + # @param parallel_tool_calls [Boolean] Whether to allow the model to run tool calls in parallel. + # + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m + # + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating + # + # @param tools [Array] An array of tools the model may call while generating a response. You + # + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, + # + # @param background [Boolean, nil] Whether to run the model response in the background. + # + # @param conversation [OpenAI::Models::Responses::Response::Conversation, nil] The conversation that this response belongs to. Input items and output items fro + # + # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in + # + # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r + # + # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to + # + # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables. + # + # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi + # + # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only** + # + # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi + # + # @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the processing type used for serving the request. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`, + # + # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain + # + # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to + # + # @param truncation [Symbol, OpenAI::Models::Responses::Response::Truncation, nil] The truncation strategy to use for the model response. + # + # @param usage [OpenAI::Models::Responses::ResponseUsage] Represents token usage details including input tokens, output tokens, + # + # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # + # @param object [Symbol, :response] The object type of this resource - always set to `response`. + + # @see OpenAI::Models::Responses::Response#incomplete_details + class IncompleteDetails < OpenAI::Internal::Type::BaseModel + # @!attribute reason # The reason why the response is incomplete. # # @return [Symbol, OpenAI::Models::Responses::Response::IncompleteDetails::Reason, nil] - optional :reason, enum: -> { OpenAI::Models::Responses::Response::IncompleteDetails::Reason } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::Response::IncompleteDetails::Reason] - # attr_writer :reason - - # @!parse - # # Details about why the response is incomplete. - # # - # # @param reason [Symbol, OpenAI::Models::Responses::Response::IncompleteDetails::Reason] - # # - # def initialize(reason: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :reason, enum: -> { OpenAI::Responses::Response::IncompleteDetails::Reason } - # @abstract + # @!method initialize(reason: nil) + # Details about why the response is incomplete. # + # @param reason [Symbol, OpenAI::Models::Responses::Response::IncompleteDetails::Reason] The reason why the response is incomplete. + # The reason why the response is incomplete. - class Reason < OpenAI::Enum + # + # @see OpenAI::Models::Responses::Response::IncompleteDetails#reason + module Reason + extend OpenAI::Internal::Type::Enum + MAX_OUTPUT_TOKENS = :max_output_tokens CONTENT_FILTER = :content_filter - finalize! + # @!method self.values + # @return [Array] end end - # @abstract + # A system (or developer) message inserted into the model's context. # - # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. - class Model < OpenAI::Union + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. + # + # @see OpenAI::Models::Responses::Response#instructions + module Instructions + extend OpenAI::Internal::Type::Union + + # A text input to the model, equivalent to a text input with the + # `developer` role. variant String - # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI - # offers a wide range of models with different capabilities, performance - # characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) - # to browse and compare available models. - variant enum: -> { OpenAI::Models::ChatModel } + # A list of one or many input items to the model, containing + # different content types. + variant -> { OpenAI::Models::Responses::Response::Instructions::ResponseInputItemArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + ResponseInputItemArray = + OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputItem }] end - # @abstract - # # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. - class ToolChoice < OpenAI::Union + # response. See the `tools` parameter to see how to specify which tools the model + # can call. + # + # @see OpenAI::Models::Responses::Response#tool_choice + module ToolChoice + extend OpenAI::Internal::Type::Union + # Controls which (if any) tool is called by the model. # # `none` means the model will not call any tool and instead generates a message. @@ -331,30 +417,91 @@ class ToolChoice < OpenAI::Union # more tools. # # `required` means the model must call one or more tools. - variant enum: -> { OpenAI::Models::Responses::ToolChoiceOptions } + variant enum: -> { OpenAI::Responses::ToolChoiceOptions } + + # Constrains the tools available to the model to a pre-defined set. + variant -> { OpenAI::Responses::ToolChoiceAllowed } # Indicates that the model should use a built-in tool to generate a response. # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). - variant -> { OpenAI::Models::Responses::ToolChoiceTypes } + variant -> { OpenAI::Responses::ToolChoiceTypes } # Use this option to force the model to call a specific function. - variant -> { OpenAI::Models::Responses::ToolChoiceFunction } + variant -> { OpenAI::Responses::ToolChoiceFunction } + + # Use this option to force the model to call a specific tool on a remote MCP server. + variant -> { OpenAI::Responses::ToolChoiceMcp } + + # Use this option to force the model to call a specific custom tool. + variant -> { OpenAI::Responses::ToolChoiceCustom } + + # @!method self.variants + # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)] end - # @abstract + # @see OpenAI::Models::Responses::Response#conversation + class Conversation < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the conversation. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # The conversation that this response belongs to. Input items and output items + # from this response are automatically added to this conversation. + # + # @param id [String] The unique ID of the conversation. + end + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + # + # @see OpenAI::Models::Responses::Response#service_tier + module ServiceTier + extend OpenAI::Internal::Type::Enum + + AUTO = :auto + DEFAULT = :default + FLEX = :flex + SCALE = :scale + PRIORITY = :priority + + # @!method self.values + # @return [Array] + end + # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. - class Truncation < OpenAI::Enum + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. + # + # @see OpenAI::Models::Responses::Response#truncation + module Truncation + extend OpenAI::Internal::Type::Enum + AUTO = :auto DISABLED = :disabled - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_audio_delta_event.rb b/lib/openai/models/responses/response_audio_delta_event.rb index 8c690aef..07ecdb3d 100644 --- a/lib/openai/models/responses/response_audio_delta_event.rb +++ b/lib/openai/models/responses/response_audio_delta_event.rb @@ -3,28 +3,36 @@ module OpenAI module Models module Responses - class ResponseAudioDeltaEvent < OpenAI::BaseModel + class ResponseAudioDeltaEvent < OpenAI::Internal::Type::BaseModel # @!attribute delta # A chunk of Base64 encoded response audio bytes. # # @return [String] required :delta, String + # @!attribute sequence_number + # A sequence number for this chunk of the stream response. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.audio.delta`. # # @return [Symbol, :"response.audio.delta"] required :type, const: :"response.audio.delta" - # @!parse - # # Emitted when there is a partial audio response. - # # - # # @param delta [String] - # # @param type [Symbol, :"response.audio.delta"] - # # - # def initialize(delta:, type: :"response.audio.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(delta:, sequence_number:, type: :"response.audio.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseAudioDeltaEvent} for more details. + # + # Emitted when there is a partial audio response. + # + # @param delta [String] A chunk of Base64 encoded response audio bytes. + # + # @param sequence_number [Integer] A sequence number for this chunk of the stream response. + # + # @param type [Symbol, :"response.audio.delta"] The type of the event. Always `response.audio.delta`. end end end diff --git a/lib/openai/models/responses/response_audio_done_event.rb b/lib/openai/models/responses/response_audio_done_event.rb index e0f3632f..c40cf77a 100644 --- a/lib/openai/models/responses/response_audio_done_event.rb +++ b/lib/openai/models/responses/response_audio_done_event.rb @@ -3,21 +3,28 @@ module OpenAI module Models module Responses - class ResponseAudioDoneEvent < OpenAI::BaseModel + class ResponseAudioDoneEvent < OpenAI::Internal::Type::BaseModel + # @!attribute sequence_number + # The sequence number of the delta. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.audio.done`. # # @return [Symbol, :"response.audio.done"] required :type, const: :"response.audio.done" - # @!parse - # # Emitted when the audio response is complete. - # # - # # @param type [Symbol, :"response.audio.done"] - # # - # def initialize(type: :"response.audio.done", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(sequence_number:, type: :"response.audio.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseAudioDoneEvent} for more details. + # + # Emitted when the audio response is complete. + # + # @param sequence_number [Integer] The sequence number of the delta. + # + # @param type [Symbol, :"response.audio.done"] The type of the event. Always `response.audio.done`. end end end diff --git a/lib/openai/models/responses/response_audio_transcript_delta_event.rb b/lib/openai/models/responses/response_audio_transcript_delta_event.rb index 476cb265..96372cc0 100644 --- a/lib/openai/models/responses/response_audio_transcript_delta_event.rb +++ b/lib/openai/models/responses/response_audio_transcript_delta_event.rb @@ -3,28 +3,36 @@ module OpenAI module Models module Responses - class ResponseAudioTranscriptDeltaEvent < OpenAI::BaseModel + class ResponseAudioTranscriptDeltaEvent < OpenAI::Internal::Type::BaseModel # @!attribute delta # The partial transcript of the audio response. # # @return [String] required :delta, String + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.audio.transcript.delta`. # # @return [Symbol, :"response.audio.transcript.delta"] required :type, const: :"response.audio.transcript.delta" - # @!parse - # # Emitted when there is a partial transcript of audio. - # # - # # @param delta [String] - # # @param type [Symbol, :"response.audio.transcript.delta"] - # # - # def initialize(delta:, type: :"response.audio.transcript.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(delta:, sequence_number:, type: :"response.audio.transcript.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent} for more details. + # + # Emitted when there is a partial transcript of audio. + # + # @param delta [String] The partial transcript of the audio response. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.audio.transcript.delta"] The type of the event. Always `response.audio.transcript.delta`. end end end diff --git a/lib/openai/models/responses/response_audio_transcript_done_event.rb b/lib/openai/models/responses/response_audio_transcript_done_event.rb index 89ee6d86..9e0d38b9 100644 --- a/lib/openai/models/responses/response_audio_transcript_done_event.rb +++ b/lib/openai/models/responses/response_audio_transcript_done_event.rb @@ -3,21 +3,28 @@ module OpenAI module Models module Responses - class ResponseAudioTranscriptDoneEvent < OpenAI::BaseModel + class ResponseAudioTranscriptDoneEvent < OpenAI::Internal::Type::BaseModel + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.audio.transcript.done`. # # @return [Symbol, :"response.audio.transcript.done"] required :type, const: :"response.audio.transcript.done" - # @!parse - # # Emitted when the full audio transcript is completed. - # # - # # @param type [Symbol, :"response.audio.transcript.done"] - # # - # def initialize(type: :"response.audio.transcript.done", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(sequence_number:, type: :"response.audio.transcript.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent} for more details. + # + # Emitted when the full audio transcript is completed. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.audio.transcript.done"] The type of the event. Always `response.audio.transcript.done`. end end end diff --git a/lib/openai/models/responses/response_cancel_params.rb b/lib/openai/models/responses/response_cancel_params.rb new file mode 100644 index 00000000..a06d628c --- /dev/null +++ b/lib/openai/models/responses/response_cancel_params.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + # @see OpenAI::Resources::Responses#cancel + class ResponseCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb b/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb index 3ec0bcba..60f1568a 100644 --- a/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +++ b/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb @@ -3,35 +3,54 @@ module OpenAI module Models module Responses - class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::BaseModel + class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseModel # @!attribute delta - # The partial code snippet added by the code interpreter. + # The partial code snippet being streamed by the code interpreter. # # @return [String] required :delta, String + # @!attribute item_id + # The unique identifier of the code interpreter tool call item. + # + # @return [String] + required :item_id, String + # @!attribute output_index - # The index of the output item that the code interpreter call is in progress. + # The index of the output item in the response for which the code is being + # streamed. # # @return [Integer] required :output_index, Integer - # @!attribute type - # The type of the event. Always `response.code_interpreter_call.code.delta`. + # @!attribute sequence_number + # The sequence number of this event, used to order streaming events. # - # @return [Symbol, :"response.code_interpreter_call.code.delta"] - required :type, const: :"response.code_interpreter_call.code.delta" + # @return [Integer] + required :sequence_number, Integer - # @!parse - # # Emitted when a partial code snippet is added by the code interpreter. - # # - # # @param delta [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.code_interpreter_call.code.delta"] - # # - # def initialize(delta:, output_index:, type: :"response.code_interpreter_call.code.delta", **) = super + # @!attribute type + # The type of the event. Always `response.code_interpreter_call_code.delta`. + # + # @return [Symbol, :"response.code_interpreter_call_code.delta"] + required :type, const: :"response.code_interpreter_call_code.delta" - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent} for more + # details. + # + # Emitted when a partial code snippet is streamed by the code interpreter. + # + # @param delta [String] The partial code snippet being streamed by the code interpreter. + # + # @param item_id [String] The unique identifier of the code interpreter tool call item. + # + # @param output_index [Integer] The index of the output item in the response for which the code is being streame + # + # @param sequence_number [Integer] The sequence number of this event, used to order streaming events. + # + # @param type [Symbol, :"response.code_interpreter_call_code.delta"] The type of the event. Always `response.code_interpreter_call_code.delta`. end end end diff --git a/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb b/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb index 19de0973..d6b47e89 100644 --- a/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +++ b/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb @@ -3,35 +3,49 @@ module OpenAI module Models module Responses - class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::BaseModel + class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseModel # @!attribute code # The final code snippet output by the code interpreter. # # @return [String] required :code, String + # @!attribute item_id + # The unique identifier of the code interpreter tool call item. + # + # @return [String] + required :item_id, String + # @!attribute output_index - # The index of the output item that the code interpreter call is in progress. + # The index of the output item in the response for which the code is finalized. # # @return [Integer] required :output_index, Integer - # @!attribute type - # The type of the event. Always `response.code_interpreter_call.code.done`. + # @!attribute sequence_number + # The sequence number of this event, used to order streaming events. # - # @return [Symbol, :"response.code_interpreter_call.code.done"] - required :type, const: :"response.code_interpreter_call.code.done" + # @return [Integer] + required :sequence_number, Integer - # @!parse - # # Emitted when code snippet output is finalized by the code interpreter. - # # - # # @param code [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.code_interpreter_call.code.done"] - # # - # def initialize(code:, output_index:, type: :"response.code_interpreter_call.code.done", **) = super + # @!attribute type + # The type of the event. Always `response.code_interpreter_call_code.done`. + # + # @return [Symbol, :"response.code_interpreter_call_code.done"] + required :type, const: :"response.code_interpreter_call_code.done" - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(code:, item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.done") + # Emitted when the code snippet is finalized by the code interpreter. + # + # @param code [String] The final code snippet output by the code interpreter. + # + # @param item_id [String] The unique identifier of the code interpreter tool call item. + # + # @param output_index [Integer] The index of the output item in the response for which the code is finalized. + # + # @param sequence_number [Integer] The sequence number of this event, used to order streaming events. + # + # @param type [Symbol, :"response.code_interpreter_call_code.done"] The type of the event. Always `response.code_interpreter_call_code.done`. end end end diff --git a/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb b/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb index 3843f408..20224eca 100644 --- a/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +++ b/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb @@ -3,35 +3,46 @@ module OpenAI module Models module Responses - class ResponseCodeInterpreterCallCompletedEvent < OpenAI::BaseModel - # @!attribute code_interpreter_call - # A tool call to run code. + class ResponseCodeInterpreterCallCompletedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The unique identifier of the code interpreter tool call item. # - # @return [OpenAI::Models::Responses::ResponseCodeInterpreterToolCall] - required :code_interpreter_call, -> { OpenAI::Models::Responses::ResponseCodeInterpreterToolCall } + # @return [String] + required :item_id, String # @!attribute output_index - # The index of the output item that the code interpreter call is in progress. + # The index of the output item in the response for which the code interpreter call + # is completed. # # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event, used to order streaming events. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.code_interpreter_call.completed`. # # @return [Symbol, :"response.code_interpreter_call.completed"] required :type, const: :"response.code_interpreter_call.completed" - # @!parse - # # Emitted when the code interpreter call is completed. - # # - # # @param code_interpreter_call [OpenAI::Models::Responses::ResponseCodeInterpreterToolCall] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.code_interpreter_call.completed"] - # # - # def initialize(code_interpreter_call:, output_index:, type: :"response.code_interpreter_call.completed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent} for more + # details. + # + # Emitted when the code interpreter call is completed. + # + # @param item_id [String] The unique identifier of the code interpreter tool call item. + # + # @param output_index [Integer] The index of the output item in the response for which the code interpreter call + # + # @param sequence_number [Integer] The sequence number of this event, used to order streaming events. + # + # @param type [Symbol, :"response.code_interpreter_call.completed"] The type of the event. Always `response.code_interpreter_call.completed`. end end end diff --git a/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb b/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb index 549d7eba..ff821153 100644 --- a/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +++ b/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb @@ -3,35 +3,46 @@ module OpenAI module Models module Responses - class ResponseCodeInterpreterCallInProgressEvent < OpenAI::BaseModel - # @!attribute code_interpreter_call - # A tool call to run code. + class ResponseCodeInterpreterCallInProgressEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The unique identifier of the code interpreter tool call item. # - # @return [OpenAI::Models::Responses::ResponseCodeInterpreterToolCall] - required :code_interpreter_call, -> { OpenAI::Models::Responses::ResponseCodeInterpreterToolCall } + # @return [String] + required :item_id, String # @!attribute output_index - # The index of the output item that the code interpreter call is in progress. + # The index of the output item in the response for which the code interpreter call + # is in progress. # # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event, used to order streaming events. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.code_interpreter_call.in_progress`. # # @return [Symbol, :"response.code_interpreter_call.in_progress"] required :type, const: :"response.code_interpreter_call.in_progress" - # @!parse - # # Emitted when a code interpreter call is in progress. - # # - # # @param code_interpreter_call [OpenAI::Models::Responses::ResponseCodeInterpreterToolCall] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.code_interpreter_call.in_progress"] - # # - # def initialize(code_interpreter_call:, output_index:, type: :"response.code_interpreter_call.in_progress", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call.in_progress") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent} for more + # details. + # + # Emitted when a code interpreter call is in progress. + # + # @param item_id [String] The unique identifier of the code interpreter tool call item. + # + # @param output_index [Integer] The index of the output item in the response for which the code interpreter call + # + # @param sequence_number [Integer] The sequence number of this event, used to order streaming events. + # + # @param type [Symbol, :"response.code_interpreter_call.in_progress"] The type of the event. Always `response.code_interpreter_call.in_progress`. end end end diff --git a/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb b/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb index bf8bbe13..0000a542 100644 --- a/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +++ b/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb @@ -3,35 +3,46 @@ module OpenAI module Models module Responses - class ResponseCodeInterpreterCallInterpretingEvent < OpenAI::BaseModel - # @!attribute code_interpreter_call - # A tool call to run code. + class ResponseCodeInterpreterCallInterpretingEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The unique identifier of the code interpreter tool call item. # - # @return [OpenAI::Models::Responses::ResponseCodeInterpreterToolCall] - required :code_interpreter_call, -> { OpenAI::Models::Responses::ResponseCodeInterpreterToolCall } + # @return [String] + required :item_id, String # @!attribute output_index - # The index of the output item that the code interpreter call is in progress. + # The index of the output item in the response for which the code interpreter is + # interpreting code. # # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event, used to order streaming events. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.code_interpreter_call.interpreting`. # # @return [Symbol, :"response.code_interpreter_call.interpreting"] required :type, const: :"response.code_interpreter_call.interpreting" - # @!parse - # # Emitted when the code interpreter is actively interpreting the code snippet. - # # - # # @param code_interpreter_call [OpenAI::Models::Responses::ResponseCodeInterpreterToolCall] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.code_interpreter_call.interpreting"] - # # - # def initialize(code_interpreter_call:, output_index:, type: :"response.code_interpreter_call.interpreting", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call.interpreting") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent} for + # more details. + # + # Emitted when the code interpreter is actively interpreting the code snippet. + # + # @param item_id [String] The unique identifier of the code interpreter tool call item. + # + # @param output_index [Integer] The index of the output item in the response for which the code interpreter is i + # + # @param sequence_number [Integer] The sequence number of this event, used to order streaming events. + # + # @param type [Symbol, :"response.code_interpreter_call.interpreting"] The type of the event. Always `response.code_interpreter_call.interpreting`. end end end diff --git a/lib/openai/models/responses/response_code_interpreter_tool_call.rb b/lib/openai/models/responses/response_code_interpreter_tool_call.rb index 25181e47..dddf9e3b 100644 --- a/lib/openai/models/responses/response_code_interpreter_tool_call.rb +++ b/lib/openai/models/responses/response_code_interpreter_tool_call.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseCodeInterpreterToolCall < OpenAI::BaseModel + class ResponseCodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel # @!attribute id # The unique ID of the code interpreter tool call. # @@ -11,23 +11,34 @@ class ResponseCodeInterpreterToolCall < OpenAI::BaseModel required :id, String # @!attribute code - # The code to run. + # The code to run, or null if not available. + # + # @return [String, nil] + required :code, String, nil?: true + + # @!attribute container_id + # The ID of the container used to run the code. # # @return [String] - required :code, String + required :container_id, String - # @!attribute results - # The results of the code interpreter tool call. + # @!attribute outputs + # The outputs generated by the code interpreter, such as logs or images. Can be + # null if no outputs are available. # - # @return [Array] - required :results, - -> { OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result] } + # @return [Array, nil] + required :outputs, + -> { + OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseCodeInterpreterToolCall::Output] + }, + nil?: true # @!attribute status - # The status of the code interpreter tool call. + # The status of the code interpreter tool call. Valid values are `in_progress`, + # `completed`, `incomplete`, `interpreting`, and `failed`. # # @return [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status] - required :status, enum: -> { OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status } + required :status, enum: -> { OpenAI::Responses::ResponseCodeInterpreterToolCall::Status } # @!attribute type # The type of the code interpreter tool call. Always `code_interpreter_call`. @@ -35,111 +46,97 @@ class ResponseCodeInterpreterToolCall < OpenAI::BaseModel # @return [Symbol, :code_interpreter_call] required :type, const: :code_interpreter_call - # @!parse - # # A tool call to run code. - # # - # # @param id [String] - # # @param code [String] - # # @param results [Array] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status] - # # @param type [Symbol, :code_interpreter_call] - # # - # def initialize(id:, code:, results:, status:, type: :code_interpreter_call, **) = super + # @!method initialize(id:, code:, container_id:, outputs:, status:, type: :code_interpreter_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCodeInterpreterToolCall} for more details. + # + # A tool call to run code. + # + # @param id [String] The unique ID of the code interpreter tool call. + # + # @param code [String, nil] The code to run, or null if not available. + # + # @param container_id [String] The ID of the container used to run the code. + # + # @param outputs [Array, nil] The outputs generated by the code interpreter, such as logs or images. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status] The status of the code interpreter tool call. Valid values are `in_progress`, `c + # + # @param type [Symbol, :code_interpreter_call] The type of the code interpreter tool call. Always `code_interpreter_call`. - # def initialize: (Hash | OpenAI::BaseModel) -> void + # The logs output from the code interpreter. + module Output + extend OpenAI::Internal::Type::Union - # @abstract - # - # The output of a code interpreter tool call that is text. - class Result < OpenAI::Union discriminator :type - # The output of a code interpreter tool call that is text. - variant :logs, -> { OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs } + # The logs output from the code interpreter. + variant :logs, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs } - # The output of a code interpreter tool call that is a file. - variant :files, -> { OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files } + # The image output from the code interpreter. + variant :image, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image } - class Logs < OpenAI::BaseModel + class Logs < OpenAI::Internal::Type::BaseModel # @!attribute logs - # The logs of the code interpreter tool call. + # The logs output from the code interpreter. # # @return [String] required :logs, String # @!attribute type - # The type of the code interpreter text output. Always `logs`. + # The type of the output. Always 'logs'. # # @return [Symbol, :logs] required :type, const: :logs - # @!parse - # # The output of a code interpreter tool call that is text. - # # - # # @param logs [String] - # # @param type [Symbol, :logs] - # # - # def initialize(logs:, type: :logs, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(logs:, type: :logs) + # The logs output from the code interpreter. + # + # @param logs [String] The logs output from the code interpreter. + # + # @param type [Symbol, :logs] The type of the output. Always 'logs'. end - class Files < OpenAI::BaseModel - # @!attribute files + class Image < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of the output. Always 'image'. # - # @return [Array] - required :files, - -> { OpenAI::ArrayOf[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File] } + # @return [Symbol, :image] + required :type, const: :image - # @!attribute type - # The type of the code interpreter file output. Always `files`. + # @!attribute url + # The URL of the image output from the code interpreter. + # + # @return [String] + required :url, String + + # @!method initialize(url:, type: :image) + # The image output from the code interpreter. # - # @return [Symbol, :files] - required :type, const: :files - - # @!parse - # # The output of a code interpreter tool call that is a file. - # # - # # @param files [Array] - # # @param type [Symbol, :files] - # # - # def initialize(files:, type: :files, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class File < OpenAI::BaseModel - # @!attribute file_id - # The ID of the file. - # - # @return [String] - required :file_id, String - - # @!attribute mime_type - # The MIME type of the file. - # - # @return [String] - required :mime_type, String - - # @!parse - # # @param file_id [String] - # # @param mime_type [String] - # # - # def initialize(file_id:, mime_type:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - end + # @param url [String] The URL of the image output from the code interpreter. + # + # @param type [Symbol, :image] The type of the output. Always 'image'. end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Image)] end - # @abstract + # The status of the code interpreter tool call. Valid values are `in_progress`, + # `completed`, `incomplete`, `interpreting`, and `failed`. # - # The status of the code interpreter tool call. - class Status < OpenAI::Enum + # @see OpenAI::Models::Responses::ResponseCodeInterpreterToolCall#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress - INTERPRETING = :interpreting COMPLETED = :completed + INCOMPLETE = :incomplete + INTERPRETING = :interpreting + FAILED = :failed - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_completed_event.rb b/lib/openai/models/responses/response_completed_event.rb index 8328ff58..37e03c75 100644 --- a/lib/openai/models/responses/response_completed_event.rb +++ b/lib/openai/models/responses/response_completed_event.rb @@ -3,12 +3,18 @@ module OpenAI module Models module Responses - class ResponseCompletedEvent < OpenAI::BaseModel + class ResponseCompletedEvent < OpenAI::Internal::Type::BaseModel # @!attribute response # Properties of the completed response. # # @return [OpenAI::Models::Responses::Response] - required :response, -> { OpenAI::Models::Responses::Response } + required :response, -> { OpenAI::Responses::Response } + + # @!attribute sequence_number + # The sequence number for this event. + # + # @return [Integer] + required :sequence_number, Integer # @!attribute type # The type of the event. Always `response.completed`. @@ -16,15 +22,17 @@ class ResponseCompletedEvent < OpenAI::BaseModel # @return [Symbol, :"response.completed"] required :type, const: :"response.completed" - # @!parse - # # Emitted when the model response is complete. - # # - # # @param response [OpenAI::Models::Responses::Response] - # # @param type [Symbol, :"response.completed"] - # # - # def initialize(response:, type: :"response.completed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(response:, sequence_number:, type: :"response.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCompletedEvent} for more details. + # + # Emitted when the model response is complete. + # + # @param response [OpenAI::Models::Responses::Response] Properties of the completed response. + # + # @param sequence_number [Integer] The sequence number for this event. + # + # @param type [Symbol, :"response.completed"] The type of the event. Always `response.completed`. end end end diff --git a/lib/openai/models/responses/response_computer_tool_call.rb b/lib/openai/models/responses/response_computer_tool_call.rb index 268cd184..96bf3742 100644 --- a/lib/openai/models/responses/response_computer_tool_call.rb +++ b/lib/openai/models/responses/response_computer_tool_call.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseComputerToolCall < OpenAI::BaseModel + class ResponseComputerToolCall < OpenAI::Internal::Type::BaseModel # @!attribute id # The unique ID of the computer call. # @@ -14,7 +14,7 @@ class ResponseComputerToolCall < OpenAI::BaseModel # A click action. # # @return [OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait] - required :action, union: -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action } + required :action, union: -> { OpenAI::Responses::ResponseComputerToolCall::Action } # @!attribute call_id # An identifier used when responding to the tool call with output. @@ -27,81 +27,87 @@ class ResponseComputerToolCall < OpenAI::BaseModel # # @return [Array] required :pending_safety_checks, - -> { OpenAI::ArrayOf[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck] } + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck] } # @!attribute status # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Status] - required :status, enum: -> { OpenAI::Models::Responses::ResponseComputerToolCall::Status } + required :status, enum: -> { OpenAI::Responses::ResponseComputerToolCall::Status } # @!attribute type # The type of the computer call. Always `computer_call`. # # @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Type] - required :type, enum: -> { OpenAI::Models::Responses::ResponseComputerToolCall::Type } - - # @!parse - # # A tool call to a computer use tool. See the - # # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) - # # for more information. - # # - # # @param id [String] - # # @param action [OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait] - # # @param call_id [String] - # # @param pending_safety_checks [Array] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Status] - # # @param type [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Type] - # # - # def initialize(id:, action:, call_id:, pending_safety_checks:, status:, type:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + required :type, enum: -> { OpenAI::Responses::ResponseComputerToolCall::Type } + + # @!method initialize(id:, action:, call_id:, pending_safety_checks:, status:, type:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall} for more details. + # + # A tool call to a computer use tool. See the + # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + # for more information. + # + # @param id [String] The unique ID of the computer call. + # + # @param action [OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait] A click action. # + # @param call_id [String] An identifier used when responding to the tool call with output. + # + # @param pending_safety_checks [Array] The pending safety checks for the computer call. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Status] The status of the item. One of `in_progress`, `completed`, or + # + # @param type [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Type] The type of the computer call. Always `computer_call`. + # A click action. - class Action < OpenAI::Union + # + # @see OpenAI::Models::Responses::ResponseComputerToolCall#action + module Action + extend OpenAI::Internal::Type::Union + discriminator :type # A click action. - variant :click, -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click } + variant :click, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Click } # A double click action. - variant :double_click, -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick } + variant :double_click, -> { OpenAI::Responses::ResponseComputerToolCall::Action::DoubleClick } # A drag action. - variant :drag, -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag } + variant :drag, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Drag } # A collection of keypresses the model would like to perform. - variant :keypress, -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress } + variant :keypress, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Keypress } # A mouse move action. - variant :move, -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move } + variant :move, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Move } # A screenshot action. - variant :screenshot, -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot } + variant :screenshot, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Screenshot } # A scroll action. - variant :scroll, -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll } + variant :scroll, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Scroll } # An action to type in text. - variant :type, -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type } + variant :type, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Type } # A wait action. - variant :wait, -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait } + variant :wait, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Wait } - class Click < OpenAI::BaseModel + class Click < OpenAI::Internal::Type::BaseModel # @!attribute button # Indicates which mouse button was pressed during the click. One of `left`, - # `right`, `wheel`, `back`, or `forward`. + # `right`, `wheel`, `back`, or `forward`. # # @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::Button] - required :button, enum: -> { OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::Button } + required :button, enum: -> { OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button } # @!attribute type # Specifies the event type. For a click action, this property is always set to - # `click`. + # `click`. # # @return [Symbol, :click] required :type, const: :click @@ -118,37 +124,43 @@ class Click < OpenAI::BaseModel # @return [Integer] required :y_, Integer, api_name: :y - # @!parse - # # A click action. - # # - # # @param button [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::Button] - # # @param x [Integer] - # # @param y_ [Integer] - # # @param type [Symbol, :click] - # # - # def initialize(button:, x:, y_:, type: :click, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(button:, x:, y_:, type: :click) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click} for more + # details. + # + # A click action. + # + # @param button [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::Button] Indicates which mouse button was pressed during the click. One of `left`, `right + # + # @param x [Integer] The x-coordinate where the click occurred. + # + # @param y_ [Integer] The y-coordinate where the click occurred. # + # @param type [Symbol, :click] Specifies the event type. For a click action, this property is + # Indicates which mouse button was pressed during the click. One of `left`, - # `right`, `wheel`, `back`, or `forward`. - class Button < OpenAI::Enum + # `right`, `wheel`, `back`, or `forward`. + # + # @see OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click#button + module Button + extend OpenAI::Internal::Type::Enum + LEFT = :left RIGHT = :right WHEEL = :wheel BACK = :back FORWARD = :forward - finalize! + # @!method self.values + # @return [Array] end end - class DoubleClick < OpenAI::BaseModel + class DoubleClick < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a double click action, this property is always set - # to `double_click`. + # to `double_click`. # # @return [Symbol, :double_click] required :type, const: :double_click @@ -165,52 +177,55 @@ class DoubleClick < OpenAI::BaseModel # @return [Integer] required :y_, Integer, api_name: :y - # @!parse - # # A double click action. - # # - # # @param x [Integer] - # # @param y_ [Integer] - # # @param type [Symbol, :double_click] - # # - # def initialize(x:, y_:, type: :double_click, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(x:, y_:, type: :double_click) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick} for + # more details. + # + # A double click action. + # + # @param x [Integer] The x-coordinate where the double click occurred. + # + # @param y_ [Integer] The y-coordinate where the double click occurred. + # + # @param type [Symbol, :double_click] Specifies the event type. For a double click action, this property is end - class Drag < OpenAI::BaseModel + class Drag < OpenAI::Internal::Type::BaseModel # @!attribute path # An array of coordinates representing the path of the drag action. Coordinates - # will appear as an array of objects, eg + # will appear as an array of objects, eg # - # ``` - # [ - # { x: 100, y: 200 }, - # { x: 200, y: 300 } - # ] - # ``` + # ``` + # [ + # { x: 100, y: 200 }, + # { x: 200, y: 300 } + # ] + # ``` # # @return [Array] required :path, - -> { OpenAI::ArrayOf[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path] } + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path] } # @!attribute type # Specifies the event type. For a drag action, this property is always set to - # `drag`. + # `drag`. # # @return [Symbol, :drag] required :type, const: :drag - # @!parse - # # A drag action. - # # - # # @param path [Array] - # # @param type [Symbol, :drag] - # # - # def initialize(path:, type: :drag, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(path:, type: :drag) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag} for more + # details. + # + # A drag action. + # + # @param path [Array] An array of coordinates representing the path of the drag action. Coordinates wi + # + # @param type [Symbol, :drag] Specifies the event type. For a drag action, this property is - class Path < OpenAI::BaseModel + class Path < OpenAI::Internal::Type::BaseModel # @!attribute x # The x-coordinate. # @@ -223,48 +238,50 @@ class Path < OpenAI::BaseModel # @return [Integer] required :y_, Integer, api_name: :y - # @!parse - # # A series of x/y coordinate pairs in the drag path. - # # - # # @param x [Integer] - # # @param y_ [Integer] - # # - # def initialize(x:, y_:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(x:, y_:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path} for + # more details. + # + # A series of x/y coordinate pairs in the drag path. + # + # @param x [Integer] The x-coordinate. + # + # @param y_ [Integer] The y-coordinate. end end - class Keypress < OpenAI::BaseModel + class Keypress < OpenAI::Internal::Type::BaseModel # @!attribute keys # The combination of keys the model is requesting to be pressed. This is an array - # of strings, each representing a key. + # of strings, each representing a key. # # @return [Array] - required :keys, OpenAI::ArrayOf[String] + required :keys, OpenAI::Internal::Type::ArrayOf[String] # @!attribute type # Specifies the event type. For a keypress action, this property is always set to - # `keypress`. + # `keypress`. # # @return [Symbol, :keypress] required :type, const: :keypress - # @!parse - # # A collection of keypresses the model would like to perform. - # # - # # @param keys [Array] - # # @param type [Symbol, :keypress] - # # - # def initialize(keys:, type: :keypress, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(keys:, type: :keypress) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress} for more + # details. + # + # A collection of keypresses the model would like to perform. + # + # @param keys [Array] The combination of keys the model is requesting to be pressed. This is an + # + # @param type [Symbol, :keypress] Specifies the event type. For a keypress action, this property is end - class Move < OpenAI::BaseModel + class Move < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a move action, this property is always set to - # `move`. + # `move`. # # @return [Symbol, :move] required :type, const: :move @@ -281,37 +298,39 @@ class Move < OpenAI::BaseModel # @return [Integer] required :y_, Integer, api_name: :y - # @!parse - # # A mouse move action. - # # - # # @param x [Integer] - # # @param y_ [Integer] - # # @param type [Symbol, :move] - # # - # def initialize(x:, y_:, type: :move, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(x:, y_:, type: :move) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move} for more + # details. + # + # A mouse move action. + # + # @param x [Integer] The x-coordinate to move to. + # + # @param y_ [Integer] The y-coordinate to move to. + # + # @param type [Symbol, :move] Specifies the event type. For a move action, this property is end - class Screenshot < OpenAI::BaseModel + class Screenshot < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a screenshot action, this property is always set - # to `screenshot`. + # to `screenshot`. # # @return [Symbol, :screenshot] required :type, const: :screenshot - # @!parse - # # A screenshot action. - # # - # # @param type [Symbol, :screenshot] - # # - # def initialize(type: :screenshot, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :screenshot) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot} for + # more details. + # + # A screenshot action. + # + # @param type [Symbol, :screenshot] Specifies the event type. For a screenshot action, this property is end - class Scroll < OpenAI::BaseModel + class Scroll < OpenAI::Internal::Type::BaseModel # @!attribute scroll_x # The horizontal scroll distance. # @@ -326,7 +345,7 @@ class Scroll < OpenAI::BaseModel # @!attribute type # Specifies the event type. For a scroll action, this property is always set to - # `scroll`. + # `scroll`. # # @return [Symbol, :scroll] required :type, const: :scroll @@ -343,21 +362,25 @@ class Scroll < OpenAI::BaseModel # @return [Integer] required :y_, Integer, api_name: :y - # @!parse - # # A scroll action. - # # - # # @param scroll_x [Integer] - # # @param scroll_y [Integer] - # # @param x [Integer] - # # @param y_ [Integer] - # # @param type [Symbol, :scroll] - # # - # def initialize(scroll_x:, scroll_y:, x:, y_:, type: :scroll, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(scroll_x:, scroll_y:, x:, y_:, type: :scroll) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll} for more + # details. + # + # A scroll action. + # + # @param scroll_x [Integer] The horizontal scroll distance. + # + # @param scroll_y [Integer] The vertical scroll distance. + # + # @param x [Integer] The x-coordinate where the scroll occurred. + # + # @param y_ [Integer] The y-coordinate where the scroll occurred. + # + # @param type [Symbol, :scroll] Specifies the event type. For a scroll action, this property is end - class Type < OpenAI::BaseModel + class Type < OpenAI::Internal::Type::BaseModel # @!attribute text # The text to type. # @@ -366,42 +389,46 @@ class Type < OpenAI::BaseModel # @!attribute type # Specifies the event type. For a type action, this property is always set to - # `type`. + # `type`. # # @return [Symbol, :type] required :type, const: :type - # @!parse - # # An action to type in text. - # # - # # @param text [String] - # # @param type [Symbol, :type] - # # - # def initialize(text:, type: :type, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(text:, type: :type) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type} for more + # details. + # + # An action to type in text. + # + # @param text [String] The text to type. + # + # @param type [Symbol, :type] Specifies the event type. For a type action, this property is end - class Wait < OpenAI::BaseModel + class Wait < OpenAI::Internal::Type::BaseModel # @!attribute type # Specifies the event type. For a wait action, this property is always set to - # `wait`. + # `wait`. # # @return [Symbol, :wait] required :type, const: :wait - # @!parse - # # A wait action. - # # - # # @param type [Symbol, :wait] - # # - # def initialize(type: :wait, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(type: :wait) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait} for more + # details. + # + # A wait action. + # + # @param type [Symbol, :wait] Specifies the event type. For a wait action, this property is end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait)] end - class PendingSafetyCheck < OpenAI::BaseModel + class PendingSafetyCheck < OpenAI::Internal::Type::BaseModel # @!attribute id # The ID of the pending safety check. # @@ -420,37 +447,41 @@ class PendingSafetyCheck < OpenAI::BaseModel # @return [String] required :message, String - # @!parse - # # A pending safety check for the computer call. - # # - # # @param id [String] - # # @param code [String] - # # @param message [String] - # # - # def initialize(id:, code:, message:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, code:, message:) + # A pending safety check for the computer call. + # + # @param id [String] The ID of the pending safety check. + # + # @param code [String] The type of the pending safety check. + # + # @param message [String] Details about the pending safety check. end - # @abstract - # # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. - class Status < OpenAI::Enum + # Populated when items are returned via API. + # + # @see OpenAI::Models::Responses::ResponseComputerToolCall#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The type of the computer call. Always `computer_call`. - class Type < OpenAI::Enum + # + # @see OpenAI::Models::Responses::ResponseComputerToolCall#type + module Type + extend OpenAI::Internal::Type::Enum + COMPUTER_CALL = :computer_call - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_computer_tool_call_output_item.rb b/lib/openai/models/responses/response_computer_tool_call_output_item.rb new file mode 100644 index 00000000..3e19423b --- /dev/null +++ b/lib/openai/models/responses/response_computer_tool_call_output_item.rb @@ -0,0 +1,109 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseComputerToolCallOutputItem < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the computer call tool output. + # + # @return [String] + required :id, String + + # @!attribute call_id + # The ID of the computer tool call that produced the output. + # + # @return [String] + required :call_id, String + + # @!attribute output + # A computer screenshot image used with the computer use tool. + # + # @return [OpenAI::Models::Responses::ResponseComputerToolCallOutputScreenshot] + required :output, -> { OpenAI::Responses::ResponseComputerToolCallOutputScreenshot } + + # @!attribute type + # The type of the computer tool call output. Always `computer_call_output`. + # + # @return [Symbol, :computer_call_output] + required :type, const: :computer_call_output + + # @!attribute acknowledged_safety_checks + # The safety checks reported by the API that have been acknowledged by the + # developer. + # + # @return [Array, nil] + optional :acknowledged_safety_checks, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck] } + + # @!attribute status + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::Status, nil] + optional :status, enum: -> { OpenAI::Responses::ResponseComputerToolCallOutputItem::Status } + + # @!method initialize(id:, call_id:, output:, acknowledged_safety_checks: nil, status: nil, type: :computer_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCallOutputItem} for more + # details. + # + # @param id [String] The unique ID of the computer call tool output. + # + # @param call_id [String] The ID of the computer tool call that produced the output. + # + # @param output [OpenAI::Models::Responses::ResponseComputerToolCallOutputScreenshot] A computer screenshot image used with the computer use tool. + # + # @param acknowledged_safety_checks [Array] The safety checks reported by the API that have been acknowledged by the + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::Status] The status of the message input. One of `in_progress`, `completed`, or + # + # @param type [Symbol, :computer_call_output] The type of the computer tool call output. Always `computer_call_output`. + + class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The ID of the pending safety check. + # + # @return [String] + required :id, String + + # @!attribute code + # The type of the pending safety check. + # + # @return [String] + required :code, String + + # @!attribute message + # Details about the pending safety check. + # + # @return [String] + required :message, String + + # @!method initialize(id:, code:, message:) + # A pending safety check for the computer call. + # + # @param id [String] The ID of the pending safety check. + # + # @param code [String] The type of the pending safety check. + # + # @param message [String] Details about the pending safety check. + end + + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + # + # @see OpenAI::Models::Responses::ResponseComputerToolCallOutputItem#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rb b/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rb new file mode 100644 index 00000000..91dcc4a5 --- /dev/null +++ b/lib/openai/models/responses/response_computer_tool_call_output_screenshot.rb @@ -0,0 +1,41 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseComputerToolCallOutputScreenshot < OpenAI::Internal::Type::BaseModel + # @!attribute type + # Specifies the event type. For a computer screenshot, this property is always set + # to `computer_screenshot`. + # + # @return [Symbol, :computer_screenshot] + required :type, const: :computer_screenshot + + # @!attribute file_id + # The identifier of an uploaded file that contains the screenshot. + # + # @return [String, nil] + optional :file_id, String + + # @!attribute image_url + # The URL of the screenshot image. + # + # @return [String, nil] + optional :image_url, String + + # @!method initialize(file_id: nil, image_url: nil, type: :computer_screenshot) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseComputerToolCallOutputScreenshot} for more + # details. + # + # A computer screenshot image used with the computer use tool. + # + # @param file_id [String] The identifier of an uploaded file that contains the screenshot. + # + # @param image_url [String] The URL of the screenshot image. + # + # @param type [Symbol, :computer_screenshot] Specifies the event type. For a computer screenshot, this property is + end + end + end +end diff --git a/lib/openai/models/responses/response_content.rb b/lib/openai/models/responses/response_content.rb index 379aaed8..6c8a047a 100644 --- a/lib/openai/models/responses/response_content.rb +++ b/lib/openai/models/responses/response_content.rb @@ -3,24 +3,27 @@ module OpenAI module Models module Responses - # @abstract - # # Multi-modal input and output contents. - class ResponseContent < OpenAI::Union + module ResponseContent + extend OpenAI::Internal::Type::Union + # A text input to the model. - variant -> { OpenAI::Models::Responses::ResponseInputText } + variant -> { OpenAI::Responses::ResponseInputText } # An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision). - variant -> { OpenAI::Models::Responses::ResponseInputImage } + variant -> { OpenAI::Responses::ResponseInputImage } # A file input to the model. - variant -> { OpenAI::Models::Responses::ResponseInputFile } + variant -> { OpenAI::Responses::ResponseInputFile } # A text output from the model. - variant -> { OpenAI::Models::Responses::ResponseOutputText } + variant -> { OpenAI::Responses::ResponseOutputText } # A refusal from the model. - variant -> { OpenAI::Models::Responses::ResponseOutputRefusal } + variant -> { OpenAI::Responses::ResponseOutputRefusal } + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] end end end diff --git a/lib/openai/models/responses/response_content_part_added_event.rb b/lib/openai/models/responses/response_content_part_added_event.rb index 094f7f60..9ef256b0 100644 --- a/lib/openai/models/responses/response_content_part_added_event.rb +++ b/lib/openai/models/responses/response_content_part_added_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseContentPartAddedEvent < OpenAI::BaseModel + class ResponseContentPartAddedEvent < OpenAI::Internal::Type::BaseModel # @!attribute content_index # The index of the content part that was added. # @@ -26,7 +26,13 @@ class ResponseContentPartAddedEvent < OpenAI::BaseModel # The content part that was added. # # @return [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] - required :part, union: -> { OpenAI::Models::Responses::ResponseContentPartAddedEvent::Part } + required :part, union: -> { OpenAI::Responses::ResponseContentPartAddedEvent::Part } + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer # @!attribute type # The type of the event. Always `response.content_part.added`. @@ -34,30 +40,40 @@ class ResponseContentPartAddedEvent < OpenAI::BaseModel # @return [Symbol, :"response.content_part.added"] required :type, const: :"response.content_part.added" - # @!parse - # # Emitted when a new content part is added. - # # - # # @param content_index [Integer] - # # @param item_id [String] - # # @param output_index [Integer] - # # @param part [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] - # # @param type [Symbol, :"response.content_part.added"] - # # - # def initialize(content_index:, item_id:, output_index:, part:, type: :"response.content_part.added", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content_index:, item_id:, output_index:, part:, sequence_number:, type: :"response.content_part.added") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseContentPartAddedEvent} for more details. + # + # Emitted when a new content part is added. + # + # @param content_index [Integer] The index of the content part that was added. + # + # @param item_id [String] The ID of the output item that the content part was added to. # + # @param output_index [Integer] The index of the output item that the content part was added to. + # + # @param part [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] The content part that was added. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.content_part.added"] The type of the event. Always `response.content_part.added`. + # The content part that was added. - class Part < OpenAI::Union + # + # @see OpenAI::Models::Responses::ResponseContentPartAddedEvent#part + module Part + extend OpenAI::Internal::Type::Union + discriminator :type # A text output from the model. - variant :output_text, -> { OpenAI::Models::Responses::ResponseOutputText } + variant :output_text, -> { OpenAI::Responses::ResponseOutputText } # A refusal from the model. - variant :refusal, -> { OpenAI::Models::Responses::ResponseOutputRefusal } + variant :refusal, -> { OpenAI::Responses::ResponseOutputRefusal } + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] end end end diff --git a/lib/openai/models/responses/response_content_part_done_event.rb b/lib/openai/models/responses/response_content_part_done_event.rb index 33a8cedb..1b7603b6 100644 --- a/lib/openai/models/responses/response_content_part_done_event.rb +++ b/lib/openai/models/responses/response_content_part_done_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseContentPartDoneEvent < OpenAI::BaseModel + class ResponseContentPartDoneEvent < OpenAI::Internal::Type::BaseModel # @!attribute content_index # The index of the content part that is done. # @@ -26,7 +26,13 @@ class ResponseContentPartDoneEvent < OpenAI::BaseModel # The content part that is done. # # @return [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] - required :part, union: -> { OpenAI::Models::Responses::ResponseContentPartDoneEvent::Part } + required :part, union: -> { OpenAI::Responses::ResponseContentPartDoneEvent::Part } + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer # @!attribute type # The type of the event. Always `response.content_part.done`. @@ -34,30 +40,40 @@ class ResponseContentPartDoneEvent < OpenAI::BaseModel # @return [Symbol, :"response.content_part.done"] required :type, const: :"response.content_part.done" - # @!parse - # # Emitted when a content part is done. - # # - # # @param content_index [Integer] - # # @param item_id [String] - # # @param output_index [Integer] - # # @param part [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] - # # @param type [Symbol, :"response.content_part.done"] - # # - # def initialize(content_index:, item_id:, output_index:, part:, type: :"response.content_part.done", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content_index:, item_id:, output_index:, part:, sequence_number:, type: :"response.content_part.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseContentPartDoneEvent} for more details. + # + # Emitted when a content part is done. + # + # @param content_index [Integer] The index of the content part that is done. + # + # @param item_id [String] The ID of the output item that the content part was added to. # + # @param output_index [Integer] The index of the output item that the content part was added to. + # + # @param part [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] The content part that is done. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.content_part.done"] The type of the event. Always `response.content_part.done`. + # The content part that is done. - class Part < OpenAI::Union + # + # @see OpenAI::Models::Responses::ResponseContentPartDoneEvent#part + module Part + extend OpenAI::Internal::Type::Union + discriminator :type # A text output from the model. - variant :output_text, -> { OpenAI::Models::Responses::ResponseOutputText } + variant :output_text, -> { OpenAI::Responses::ResponseOutputText } # A refusal from the model. - variant :refusal, -> { OpenAI::Models::Responses::ResponseOutputRefusal } + variant :refusal, -> { OpenAI::Responses::ResponseOutputRefusal } + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] end end end diff --git a/lib/openai/models/responses/response_conversation_param.rb b/lib/openai/models/responses/response_conversation_param.rb new file mode 100644 index 00000000..b4ab2977 --- /dev/null +++ b/lib/openai/models/responses/response_conversation_param.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseConversationParam < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the conversation. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # The conversation that this response belongs to. + # + # @param id [String] The unique ID of the conversation. + end + end + end +end diff --git a/lib/openai/models/responses/response_create_params.rb b/lib/openai/models/responses/response_create_params.rb index 94db6d99..6a193914 100644 --- a/lib/openai/models/responses/response_create_params.rb +++ b/lib/openai/models/responses/response_create_params.rb @@ -3,171 +3,261 @@ module OpenAI module Models module Responses - class ResponseCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Responses#create + # + # @see OpenAI::Resources::Responses#stream_raw + class ResponseCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute input - # Text, image, or file inputs to the model, used to generate a response. - # - # Learn more: - # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Image inputs](https://platform.openai.com/docs/guides/images) - # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + # @!attribute background + # Whether to run the model response in the background. + # [Learn more](https://platform.openai.com/docs/guides/background). # - # @return [String, Array] - required :input, union: -> { OpenAI::Models::Responses::ResponseCreateParams::Input } + # @return [Boolean, nil] + optional :background, OpenAI::Internal::Type::Boolean, nil?: true - # @!attribute model - # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # @!attribute conversation + # The conversation that this response belongs to. Items from this conversation are + # prepended to `input_items` for this response request. Input items and output + # items from this response are automatically added to this conversation after this + # response completes. # - # @return [String, Symbol, OpenAI::Models::ChatModel] - required :model, union: -> { OpenAI::Models::Responses::ResponseCreateParams::Model } + # @return [String, OpenAI::Models::Responses::ResponseConversationParam, nil] + optional :conversation, + union: -> { + OpenAI::Responses::ResponseCreateParams::Conversation + }, + nil?: true # @!attribute include # Specify additional output data to include in the model response. Currently - # supported values are: - # - # - `file_search_call.results`: Include the search results of the file search tool - # call. - # - `message.input_image.image_url`: Include image urls from the input message. - # - `computer_call_output.output.image_url`: Include image urls from the computer - # call output. + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). # # @return [Array, nil] - optional :include, -> { OpenAI::ArrayOf[enum: OpenAI::Models::Responses::ResponseIncludable] }, nil?: true + optional :include, + -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] }, + nil?: true + + # @!attribute input + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + # + # @return [String, Array, nil] + optional :input, union: -> { OpenAI::Responses::ResponseCreateParams::Input } # @!attribute instructions - # Inserts a system (or developer) message as the first item in the model's - # context. + # A system (or developer) message inserted into the model's context. # - # When using along with `previous_response_id`, the instructions from a previous - # response will be not be carried over to the next response. This makes it simple - # to swap out system (or developer) messages in new responses. + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. # # @return [String, nil] optional :instructions, String, nil?: true # @!attribute max_output_tokens # An upper bound for the number of tokens that can be generated for a response, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). # # @return [Integer, nil] optional :max_output_tokens, Integer, nil?: true + # @!attribute max_tool_calls + # The maximum number of total calls to built-in tools that can be processed in a + # response. This maximum number applies across all built-in tool calls, not per + # individual tool. Any further attempts to call a tool by the model will be + # ignored. + # + # @return [Integer, nil] + optional :max_tool_calls, Integer, nil?: true + # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute model + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + # + # @return [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel, nil] + optional :model, union: -> { OpenAI::ResponsesModel } # @!attribute parallel_tool_calls # Whether to allow the model to run tool calls in parallel. # # @return [Boolean, nil] - optional :parallel_tool_calls, OpenAI::BooleanModel, nil?: true + optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean, nil?: true # @!attribute previous_response_id # The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. Learn more about - # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # Cannot be used in conjunction with `conversation`. # # @return [String, nil] optional :previous_response_id, String, nil?: true + # @!attribute prompt + # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + # + # @return [OpenAI::Models::Responses::ResponsePrompt, nil] + optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true + + # @!attribute prompt_cache_key + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + # + # @return [String, nil] + optional :prompt_cache_key, String + # @!attribute reasoning - # **o-series models only** + # **gpt-5 and o-series models only** # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). # # @return [OpenAI::Models::Reasoning, nil] - optional :reasoning, -> { OpenAI::Models::Reasoning }, nil?: true + optional :reasoning, -> { OpenAI::Reasoning }, nil?: true + + # @!attribute safety_identifier + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + # + # @return [String, nil] + optional :safety_identifier, String + + # @!attribute service_tier + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] + optional :service_tier, enum: -> { OpenAI::Responses::ResponseCreateParams::ServiceTier }, nil?: true # @!attribute store # Whether to store the generated model response for later retrieval via API. # # @return [Boolean, nil] - optional :store, OpenAI::BooleanModel, nil?: true + optional :store, OpenAI::Internal::Type::Boolean, nil?: true + + # @!attribute stream_options + # Options for streaming responses. Only set this when you set `stream: true`. + # + # @return [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] + optional :stream_options, -> { OpenAI::Responses::ResponseCreateParams::StreamOptions }, nil?: true # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. # # @return [Float, nil] optional :temperature, Float, nil?: true - # @!attribute [r] text + # @!attribute text # Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # structured JSON data. Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) # # @return [OpenAI::Models::Responses::ResponseTextConfig, nil] - optional :text, -> { OpenAI::Models::Responses::ResponseTextConfig } + optional :text, -> { OpenAI::Responses::ResponseTextConfig } - # @!parse - # # @return [OpenAI::Models::Responses::ResponseTextConfig] - # attr_writer :text - - # @!attribute [r] tool_choice + # @!attribute tool_choice # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # response. See the `tools` parameter to see how to specify which tools the model + # can call. # - # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, nil] - optional :tool_choice, union: -> { OpenAI::Models::Responses::ResponseCreateParams::ToolChoice } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] - # attr_writer :tool_choice + # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil] + optional :tool_choice, union: -> { OpenAI::Responses::ResponseCreateParams::ToolChoice } - # @!attribute [r] tools + # @!attribute tools # An array of tools the model may call while generating a response. You can - # specify which tool to use by setting the `tool_choice` parameter. + # specify which tool to use by setting the `tool_choice` parameter. # - # The two categories of tools you can provide the model are: + # The two categories of tools you can provide the model are: # - # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - # capabilities, like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search). - # Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). - # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. # - # @return [Array, nil] - optional :tools, -> { OpenAI::ArrayOf[union: OpenAI::Models::Responses::Tool] } + # @return [Array, nil] + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } - # @!parse - # # @return [Array] - # attr_writer :tools + # @!attribute top_logprobs + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # + # @return [Integer, nil] + optional :top_logprobs, Integer, nil?: true # @!attribute top_p # An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. # - # We generally recommend altering this or `temperature` but not both. + # We generally recommend altering this or `temperature` but not both. # # @return [Float, nil] optional :top_p, Float, nil?: true @@ -175,117 +265,183 @@ class ResponseCreateParams < OpenAI::BaseModel # @!attribute truncation # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. # # @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] - optional :truncation, enum: -> { OpenAI::Models::Responses::ResponseCreateParams::Truncation }, nil?: true + optional :truncation, enum: -> { OpenAI::Responses::ResponseCreateParams::Truncation }, nil?: true - # @!attribute [r] user - # A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @!attribute user + # @deprecated + # + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). # # @return [String, nil] optional :user, String - # @!parse - # # @return [String] - # attr_writer :user - - # @!parse - # # @param input [String, Array] - # # @param model [String, Symbol, OpenAI::Models::ChatModel] - # # @param include [Array, nil] - # # @param instructions [String, nil] - # # @param max_output_tokens [Integer, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param parallel_tool_calls [Boolean, nil] - # # @param previous_response_id [String, nil] - # # @param reasoning [OpenAI::Models::Reasoning, nil] - # # @param store [Boolean, nil] - # # @param temperature [Float, nil] - # # @param text [OpenAI::Models::Responses::ResponseTextConfig] - # # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] - # # @param tools [Array] - # # @param top_p [Float, nil] - # # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] - # # @param user [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # input:, - # model:, - # include: nil, - # instructions: nil, - # max_output_tokens: nil, - # metadata: nil, - # parallel_tool_calls: nil, - # previous_response_id: nil, - # reasoning: nil, - # store: nil, - # temperature: nil, - # text: nil, - # tool_choice: nil, - # tools: nil, - # top_p: nil, - # truncation: nil, - # user: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCreateParams} for more details. + # + # @param background [Boolean, nil] Whether to run the model response in the background. + # + # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are + # + # @param include [Array, nil] Specify additional output data to include in the model response. Currently + # + # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. + # + # @param instructions [String, nil] A system (or developer) message inserted into the model's context. + # + # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in + # + # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI + # + # @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel. + # + # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to + # + # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables. + # + # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi + # + # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only** + # + # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi + # + # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request. + # + # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via + # + # @param stream_options [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] Options for streaming responses. Only set this when you set `stream: true`. + # + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m + # + # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain + # + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating + # + # @param tools [Array] An array of tools the model may call while generating a response. You + # + # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to + # + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, + # + # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response. + # + # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + # The conversation that this response belongs to. Items from this conversation are + # prepended to `input_items` for this response request. Input items and output + # items from this response are automatically added to this conversation after this + # response completes. + module Conversation + extend OpenAI::Internal::Type::Union + + # The unique ID of the conversation. + variant String + + # The conversation that this response belongs to. + variant -> { OpenAI::Responses::ResponseConversationParam } + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseConversationParam)] + end + # Text, image, or file inputs to the model, used to generate a response. # - # Learn more: + # Learn more: # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Image inputs](https://platform.openai.com/docs/guides/images) - # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - # - [Function calling](https://platform.openai.com/docs/guides/function-calling) - class Input < OpenAI::Union + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + module Input + extend OpenAI::Internal::Type::Union + # A text input to the model, equivalent to a text input with the # `user` role. variant String # A list of one or many input items to the model, containing # different content types. - variant -> { OpenAI::Models::Responses::ResponseInput } + variant -> { OpenAI::Responses::ResponseInput } + + # @!method self.variants + # @return [Array(String, Array)] end - # @abstract - # - # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. - class Model < OpenAI::Union - variant String + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + module ServiceTier + extend OpenAI::Internal::Type::Enum - # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI - # offers a wide range of models with different capabilities, performance - # characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) - # to browse and compare available models. - variant enum: -> { OpenAI::Models::ChatModel } + AUTO = :auto + DEFAULT = :default + FLEX = :flex + SCALE = :scale + PRIORITY = :priority + + # @!method self.values + # @return [Array] + end + + class StreamOptions < OpenAI::Internal::Type::BaseModel + # @!attribute include_obfuscation + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + # + # @return [Boolean, nil] + optional :include_obfuscation, OpenAI::Internal::Type::Boolean + + # @!method initialize(include_obfuscation: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCreateParams::StreamOptions} for more + # details. + # + # Options for streaming responses. Only set this when you set `stream: true`. + # + # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds end - # @abstract - # # How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. - class ToolChoice < OpenAI::Union + # response. See the `tools` parameter to see how to specify which tools the model + # can call. + module ToolChoice + extend OpenAI::Internal::Type::Union + # Controls which (if any) tool is called by the model. # # `none` means the model will not call any tool and instead generates a message. @@ -294,30 +450,43 @@ class ToolChoice < OpenAI::Union # more tools. # # `required` means the model must call one or more tools. - variant enum: -> { OpenAI::Models::Responses::ToolChoiceOptions } + variant enum: -> { OpenAI::Responses::ToolChoiceOptions } + + # Constrains the tools available to the model to a pre-defined set. + variant -> { OpenAI::Responses::ToolChoiceAllowed } # Indicates that the model should use a built-in tool to generate a response. # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). - variant -> { OpenAI::Models::Responses::ToolChoiceTypes } + variant -> { OpenAI::Responses::ToolChoiceTypes } # Use this option to force the model to call a specific function. - variant -> { OpenAI::Models::Responses::ToolChoiceFunction } + variant -> { OpenAI::Responses::ToolChoiceFunction } + + # Use this option to force the model to call a specific tool on a remote MCP server. + variant -> { OpenAI::Responses::ToolChoiceMcp } + + # Use this option to force the model to call a specific custom tool. + variant -> { OpenAI::Responses::ToolChoiceCustom } + + # @!method self.variants + # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)] end - # @abstract - # # The truncation strategy to use for the model response. # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. - class Truncation < OpenAI::Enum + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. + module Truncation + extend OpenAI::Internal::Type::Enum + AUTO = :auto DISABLED = :disabled - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_created_event.rb b/lib/openai/models/responses/response_created_event.rb index 9a4cc890..58d54869 100644 --- a/lib/openai/models/responses/response_created_event.rb +++ b/lib/openai/models/responses/response_created_event.rb @@ -3,12 +3,18 @@ module OpenAI module Models module Responses - class ResponseCreatedEvent < OpenAI::BaseModel + class ResponseCreatedEvent < OpenAI::Internal::Type::BaseModel # @!attribute response # The response that was created. # # @return [OpenAI::Models::Responses::Response] - required :response, -> { OpenAI::Models::Responses::Response } + required :response, -> { OpenAI::Responses::Response } + + # @!attribute sequence_number + # The sequence number for this event. + # + # @return [Integer] + required :sequence_number, Integer # @!attribute type # The type of the event. Always `response.created`. @@ -16,15 +22,17 @@ class ResponseCreatedEvent < OpenAI::BaseModel # @return [Symbol, :"response.created"] required :type, const: :"response.created" - # @!parse - # # An event that is emitted when a response is created. - # # - # # @param response [OpenAI::Models::Responses::Response] - # # @param type [Symbol, :"response.created"] - # # - # def initialize(response:, type: :"response.created", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(response:, sequence_number:, type: :"response.created") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCreatedEvent} for more details. + # + # An event that is emitted when a response is created. + # + # @param response [OpenAI::Models::Responses::Response] The response that was created. + # + # @param sequence_number [Integer] The sequence number for this event. + # + # @param type [Symbol, :"response.created"] The type of the event. Always `response.created`. end end end diff --git a/lib/openai/models/responses/response_custom_tool_call.rb b/lib/openai/models/responses/response_custom_tool_call.rb new file mode 100644 index 00000000..48dae1e0 --- /dev/null +++ b/lib/openai/models/responses/response_custom_tool_call.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseCustomToolCall < OpenAI::Internal::Type::BaseModel + # @!attribute call_id + # An identifier used to map this custom tool call to a tool call output. + # + # @return [String] + required :call_id, String + + # @!attribute input + # The input for the custom tool call generated by the model. + # + # @return [String] + required :input, String + + # @!attribute name + # The name of the custom tool being called. + # + # @return [String] + required :name, String + + # @!attribute type + # The type of the custom tool call. Always `custom_tool_call`. + # + # @return [Symbol, :custom_tool_call] + required :type, const: :custom_tool_call + + # @!attribute id + # The unique ID of the custom tool call in the OpenAI platform. + # + # @return [String, nil] + optional :id, String + + # @!method initialize(call_id:, input:, name:, id: nil, type: :custom_tool_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCustomToolCall} for more details. + # + # A call to a custom tool created by the model. + # + # @param call_id [String] An identifier used to map this custom tool call to a tool call output. + # + # @param input [String] The input for the custom tool call generated by the model. + # + # @param name [String] The name of the custom tool being called. + # + # @param id [String] The unique ID of the custom tool call in the OpenAI platform. + # + # @param type [Symbol, :custom_tool_call] The type of the custom tool call. Always `custom_tool_call`. + end + end + end +end diff --git a/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb b/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb new file mode 100644 index 00000000..5fa83189 --- /dev/null +++ b/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallInputDeltaEvent < OpenAI::Internal::Type::BaseModel + # @!attribute delta + # The incremental input data (delta) for the custom tool call. + # + # @return [String] + required :delta, String + + # @!attribute item_id + # Unique identifier for the API item associated with this event. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output this delta applies to. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The event type identifier. + # + # @return [Symbol, :"response.custom_tool_call_input.delta"] + required :type, const: :"response.custom_tool_call_input.delta" + + # @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.custom_tool_call_input.delta") + # Event representing a delta (partial update) to the input of a custom tool call. + # + # @param delta [String] The incremental input data (delta) for the custom tool call. + # + # @param item_id [String] Unique identifier for the API item associated with this event. + # + # @param output_index [Integer] The index of the output this delta applies to. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.custom_tool_call_input.delta"] The event type identifier. + end + end + end +end diff --git a/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb b/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb new file mode 100644 index 00000000..e45a41e0 --- /dev/null +++ b/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallInputDoneEvent < OpenAI::Internal::Type::BaseModel + # @!attribute input + # The complete input data for the custom tool call. + # + # @return [String] + required :input, String + + # @!attribute item_id + # Unique identifier for the API item associated with this event. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output this event applies to. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The event type identifier. + # + # @return [Symbol, :"response.custom_tool_call_input.done"] + required :type, const: :"response.custom_tool_call_input.done" + + # @!method initialize(input:, item_id:, output_index:, sequence_number:, type: :"response.custom_tool_call_input.done") + # Event indicating that input for a custom tool call is complete. + # + # @param input [String] The complete input data for the custom tool call. + # + # @param item_id [String] Unique identifier for the API item associated with this event. + # + # @param output_index [Integer] The index of the output this event applies to. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.custom_tool_call_input.done"] The event type identifier. + end + end + end +end diff --git a/lib/openai/models/responses/response_custom_tool_call_output.rb b/lib/openai/models/responses/response_custom_tool_call_output.rb new file mode 100644 index 00000000..644997e7 --- /dev/null +++ b/lib/openai/models/responses/response_custom_tool_call_output.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallOutput < OpenAI::Internal::Type::BaseModel + # @!attribute call_id + # The call ID, used to map this custom tool call output to a custom tool call. + # + # @return [String] + required :call_id, String + + # @!attribute output + # The output from the custom tool call generated by your code. + # + # @return [String] + required :output, String + + # @!attribute type + # The type of the custom tool call output. Always `custom_tool_call_output`. + # + # @return [Symbol, :custom_tool_call_output] + required :type, const: :custom_tool_call_output + + # @!attribute id + # The unique ID of the custom tool call output in the OpenAI platform. + # + # @return [String, nil] + optional :id, String + + # @!method initialize(call_id:, output:, id: nil, type: :custom_tool_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCustomToolCallOutput} for more details. + # + # The output of a custom tool call from your code, being sent back to the model. + # + # @param call_id [String] The call ID, used to map this custom tool call output to a custom tool call. + # + # @param output [String] The output from the custom tool call generated by your code. + # + # @param id [String] The unique ID of the custom tool call output in the OpenAI platform. + # + # @param type [Symbol, :custom_tool_call_output] The type of the custom tool call output. Always `custom_tool_call_output`. + end + end + end +end diff --git a/lib/openai/models/responses/response_delete_params.rb b/lib/openai/models/responses/response_delete_params.rb index 0ecfdf28..96a2b404 100644 --- a/lib/openai/models/responses/response_delete_params.rb +++ b/lib/openai/models/responses/response_delete_params.rb @@ -3,17 +3,13 @@ module OpenAI module Models module Responses - class ResponseDeleteParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Responses#delete + class ResponseDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/responses/response_error.rb b/lib/openai/models/responses/response_error.rb index 47dd8b4b..90c420f1 100644 --- a/lib/openai/models/responses/response_error.rb +++ b/lib/openai/models/responses/response_error.rb @@ -3,12 +3,12 @@ module OpenAI module Models module Responses - class ResponseError < OpenAI::BaseModel + class ResponseError < OpenAI::Internal::Type::BaseModel # @!attribute code # The error code for the response. # # @return [Symbol, OpenAI::Models::Responses::ResponseError::Code] - required :code, enum: -> { OpenAI::Models::Responses::ResponseError::Code } + required :code, enum: -> { OpenAI::Responses::ResponseError::Code } # @!attribute message # A human-readable description of the error. @@ -16,20 +16,22 @@ class ResponseError < OpenAI::BaseModel # @return [String] required :message, String - # @!parse - # # An error object returned when the model fails to generate a Response. - # # - # # @param code [Symbol, OpenAI::Models::Responses::ResponseError::Code] - # # @param message [String] - # # - # def initialize(code:, message:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(code:, message:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseError} for more details. + # + # An error object returned when the model fails to generate a Response. + # + # @param code [Symbol, OpenAI::Models::Responses::ResponseError::Code] The error code for the response. # + # @param message [String] A human-readable description of the error. + # The error code for the response. - class Code < OpenAI::Enum + # + # @see OpenAI::Models::Responses::ResponseError#code + module Code + extend OpenAI::Internal::Type::Enum + SERVER_ERROR = :server_error RATE_LIMIT_EXCEEDED = :rate_limit_exceeded INVALID_PROMPT = :invalid_prompt @@ -49,7 +51,8 @@ class Code < OpenAI::Enum FAILED_TO_DOWNLOAD_IMAGE = :failed_to_download_image IMAGE_FILE_NOT_FOUND = :image_file_not_found - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_error_event.rb b/lib/openai/models/responses/response_error_event.rb index 057c30e1..9dde9fb4 100644 --- a/lib/openai/models/responses/response_error_event.rb +++ b/lib/openai/models/responses/response_error_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseErrorEvent < OpenAI::BaseModel + class ResponseErrorEvent < OpenAI::Internal::Type::BaseModel # @!attribute code # The error code. # @@ -22,23 +22,33 @@ class ResponseErrorEvent < OpenAI::BaseModel # @return [String, nil] required :param, String, nil?: true + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `error`. # # @return [Symbol, :error] required :type, const: :error - # @!parse - # # Emitted when an error occurs. - # # - # # @param code [String, nil] - # # @param message [String] - # # @param param [String, nil] - # # @param type [Symbol, :error] - # # - # def initialize(code:, message:, param:, type: :error, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(code:, message:, param:, sequence_number:, type: :error) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseErrorEvent} for more details. + # + # Emitted when an error occurs. + # + # @param code [String, nil] The error code. + # + # @param message [String] The error message. + # + # @param param [String, nil] The error parameter. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :error] The type of the event. Always `error`. end end end diff --git a/lib/openai/models/responses/response_failed_event.rb b/lib/openai/models/responses/response_failed_event.rb index 2b4d5471..064ba785 100644 --- a/lib/openai/models/responses/response_failed_event.rb +++ b/lib/openai/models/responses/response_failed_event.rb @@ -3,12 +3,18 @@ module OpenAI module Models module Responses - class ResponseFailedEvent < OpenAI::BaseModel + class ResponseFailedEvent < OpenAI::Internal::Type::BaseModel # @!attribute response # The response that failed. # # @return [OpenAI::Models::Responses::Response] - required :response, -> { OpenAI::Models::Responses::Response } + required :response, -> { OpenAI::Responses::Response } + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer # @!attribute type # The type of the event. Always `response.failed`. @@ -16,15 +22,17 @@ class ResponseFailedEvent < OpenAI::BaseModel # @return [Symbol, :"response.failed"] required :type, const: :"response.failed" - # @!parse - # # An event that is emitted when a response fails. - # # - # # @param response [OpenAI::Models::Responses::Response] - # # @param type [Symbol, :"response.failed"] - # # - # def initialize(response:, type: :"response.failed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(response:, sequence_number:, type: :"response.failed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFailedEvent} for more details. + # + # An event that is emitted when a response fails. + # + # @param response [OpenAI::Models::Responses::Response] The response that failed. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.failed"] The type of the event. Always `response.failed`. end end end diff --git a/lib/openai/models/responses/response_file_search_call_completed_event.rb b/lib/openai/models/responses/response_file_search_call_completed_event.rb index 8aaa0427..8ae559c0 100644 --- a/lib/openai/models/responses/response_file_search_call_completed_event.rb +++ b/lib/openai/models/responses/response_file_search_call_completed_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseFileSearchCallCompletedEvent < OpenAI::BaseModel + class ResponseFileSearchCallCompletedEvent < OpenAI::Internal::Type::BaseModel # @!attribute item_id # The ID of the output item that the file search call is initiated. # @@ -16,22 +16,32 @@ class ResponseFileSearchCallCompletedEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.file_search_call.completed`. # # @return [Symbol, :"response.file_search_call.completed"] required :type, const: :"response.file_search_call.completed" - # @!parse - # # Emitted when a file search call is completed (results found). - # # - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.file_search_call.completed"] - # # - # def initialize(item_id:, output_index:, type: :"response.file_search_call.completed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.file_search_call.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent} for more + # details. + # + # Emitted when a file search call is completed (results found). + # + # @param item_id [String] The ID of the output item that the file search call is initiated. + # + # @param output_index [Integer] The index of the output item that the file search call is initiated. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.file_search_call.completed"] The type of the event. Always `response.file_search_call.completed`. end end end diff --git a/lib/openai/models/responses/response_file_search_call_in_progress_event.rb b/lib/openai/models/responses/response_file_search_call_in_progress_event.rb index 09237200..2dc256ec 100644 --- a/lib/openai/models/responses/response_file_search_call_in_progress_event.rb +++ b/lib/openai/models/responses/response_file_search_call_in_progress_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseFileSearchCallInProgressEvent < OpenAI::BaseModel + class ResponseFileSearchCallInProgressEvent < OpenAI::Internal::Type::BaseModel # @!attribute item_id # The ID of the output item that the file search call is initiated. # @@ -16,22 +16,32 @@ class ResponseFileSearchCallInProgressEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.file_search_call.in_progress`. # # @return [Symbol, :"response.file_search_call.in_progress"] required :type, const: :"response.file_search_call.in_progress" - # @!parse - # # Emitted when a file search call is initiated. - # # - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.file_search_call.in_progress"] - # # - # def initialize(item_id:, output_index:, type: :"response.file_search_call.in_progress", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.file_search_call.in_progress") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent} for more + # details. + # + # Emitted when a file search call is initiated. + # + # @param item_id [String] The ID of the output item that the file search call is initiated. + # + # @param output_index [Integer] The index of the output item that the file search call is initiated. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.file_search_call.in_progress"] The type of the event. Always `response.file_search_call.in_progress`. end end end diff --git a/lib/openai/models/responses/response_file_search_call_searching_event.rb b/lib/openai/models/responses/response_file_search_call_searching_event.rb index bc87ce2c..9cd93593 100644 --- a/lib/openai/models/responses/response_file_search_call_searching_event.rb +++ b/lib/openai/models/responses/response_file_search_call_searching_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseFileSearchCallSearchingEvent < OpenAI::BaseModel + class ResponseFileSearchCallSearchingEvent < OpenAI::Internal::Type::BaseModel # @!attribute item_id # The ID of the output item that the file search call is initiated. # @@ -16,22 +16,32 @@ class ResponseFileSearchCallSearchingEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.file_search_call.searching`. # # @return [Symbol, :"response.file_search_call.searching"] required :type, const: :"response.file_search_call.searching" - # @!parse - # # Emitted when a file search is currently searching. - # # - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.file_search_call.searching"] - # # - # def initialize(item_id:, output_index:, type: :"response.file_search_call.searching", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.file_search_call.searching") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent} for more + # details. + # + # Emitted when a file search is currently searching. + # + # @param item_id [String] The ID of the output item that the file search call is initiated. + # + # @param output_index [Integer] The index of the output item that the file search call is searching. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.file_search_call.searching"] The type of the event. Always `response.file_search_call.searching`. end end end diff --git a/lib/openai/models/responses/response_file_search_tool_call.rb b/lib/openai/models/responses/response_file_search_tool_call.rb index df500ca6..034f63b7 100644 --- a/lib/openai/models/responses/response_file_search_tool_call.rb +++ b/lib/openai/models/responses/response_file_search_tool_call.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseFileSearchToolCall < OpenAI::BaseModel + class ResponseFileSearchToolCall < OpenAI::Internal::Type::BaseModel # @!attribute id # The unique ID of the file search tool call. # @@ -14,14 +14,14 @@ class ResponseFileSearchToolCall < OpenAI::BaseModel # The queries used to search for files. # # @return [Array] - required :queries, OpenAI::ArrayOf[String] + required :queries, OpenAI::Internal::Type::ArrayOf[String] # @!attribute status # The status of the file search tool call. One of `in_progress`, `searching`, - # `incomplete` or `failed`, + # `incomplete` or `failed`, # # @return [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall::Status] - required :status, enum: -> { OpenAI::Models::Responses::ResponseFileSearchToolCall::Status } + required :status, enum: -> { OpenAI::Responses::ResponseFileSearchToolCall::Status } # @!attribute type # The type of the file search tool call. Always `file_search_call`. @@ -34,110 +34,111 @@ class ResponseFileSearchToolCall < OpenAI::BaseModel # # @return [Array, nil] optional :results, - -> { OpenAI::ArrayOf[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result] }, + -> { + OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFileSearchToolCall::Result] + }, nil?: true - # @!parse - # # The results of a file search tool call. See the - # # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) - # # for more information. - # # - # # @param id [String] - # # @param queries [Array] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall::Status] - # # @param results [Array, nil] - # # @param type [Symbol, :file_search_call] - # # - # def initialize(id:, queries:, status:, results: nil, type: :file_search_call, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(id:, queries:, status:, results: nil, type: :file_search_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFileSearchToolCall} for more details. + # + # The results of a file search tool call. See the + # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + # for more information. + # + # @param id [String] The unique ID of the file search tool call. + # + # @param queries [Array] The queries used to search for files. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall::Status] The status of the file search tool call. One of `in_progress`, + # + # @param results [Array, nil] The results of the file search tool call. # + # @param type [Symbol, :file_search_call] The type of the file search tool call. Always `file_search_call`. + # The status of the file search tool call. One of `in_progress`, `searching`, - # `incomplete` or `failed`, - class Status < OpenAI::Enum + # `incomplete` or `failed`, + # + # @see OpenAI::Models::Responses::ResponseFileSearchToolCall#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress SEARCHING = :searching COMPLETED = :completed INCOMPLETE = :incomplete FAILED = :failed - finalize! + # @!method self.values + # @return [Array] end - class Result < OpenAI::BaseModel + class Result < OpenAI::Internal::Type::BaseModel # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] optional :attributes, - -> { OpenAI::HashOf[union: OpenAI::Models::Responses::ResponseFileSearchToolCall::Result::Attribute] }, + -> { + OpenAI::Internal::Type::HashOf[union: OpenAI::Responses::ResponseFileSearchToolCall::Result::Attribute] + }, nil?: true - # @!attribute [r] file_id + # @!attribute file_id # The unique ID of the file. # # @return [String, nil] optional :file_id, String - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!attribute [r] filename + # @!attribute filename # The name of the file. # # @return [String, nil] optional :filename, String - # @!parse - # # @return [String] - # attr_writer :filename - - # @!attribute [r] score + # @!attribute score # The relevance score of the file - a value between 0 and 1. # # @return [Float, nil] optional :score, Float - # @!parse - # # @return [Float] - # attr_writer :score - - # @!attribute [r] text + # @!attribute text # The text that was retrieved from the file. # # @return [String, nil] optional :text, String - # @!parse - # # @return [String] - # attr_writer :text - - # @!parse - # # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] - # # @param file_id [String] - # # @param filename [String] - # # @param score [Float] - # # @param text [String] - # # - # def initialize(attributes: nil, file_id: nil, filename: nil, score: nil, text: nil, **) = super + # @!method initialize(attributes: nil, file_id: nil, filename: nil, score: nil, text: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFileSearchToolCall::Result} for more + # details. + # + # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param file_id [String] The unique ID of the file. + # + # @param filename [String] The name of the file. + # + # @param score [Float] The relevance score of the file - a value between 0 and 1. + # + # @param text [String] The text that was retrieved from the file. - # def initialize: (Hash | OpenAI::BaseModel) -> void + module Attribute + extend OpenAI::Internal::Type::Union - # @abstract - # - class Attribute < OpenAI::Union variant String variant Float - variant OpenAI::BooleanModel + variant OpenAI::Internal::Type::Boolean + + # @!method self.variants + # @return [Array(String, Float, Boolean)] end end end diff --git a/lib/openai/models/responses/response_format_text_config.rb b/lib/openai/models/responses/response_format_text_config.rb index beac92f7..43f6ad0c 100644 --- a/lib/openai/models/responses/response_format_text_config.rb +++ b/lib/openai/models/responses/response_format_text_config.rb @@ -3,36 +3,39 @@ module OpenAI module Models module Responses - # @abstract - # # An object specifying the format that the model must output. # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # The default format is `{ "type": "text" }` with no additional options. + # The default format is `{ "type": "text" }` with no additional options. # - # **Not recommended for gpt-4o and newer models:** + # **Not recommended for gpt-4o and newer models:** # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. - class ResponseFormatTextConfig < OpenAI::Union + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + module ResponseFormatTextConfig + extend OpenAI::Internal::Type::Union + discriminator :type # Default response format. Used to generate text responses. - variant :text, -> { OpenAI::Models::ResponseFormatText } + variant :text, -> { OpenAI::ResponseFormatText } # JSON Schema response format. Used to generate structured JSON responses. # Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). - variant :json_schema, -> { OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig } + variant :json_schema, -> { OpenAI::Responses::ResponseFormatTextJSONSchemaConfig } # JSON object response format. An older method of generating JSON responses. # Using `json_schema` is recommended for models that support it. Note that the # model will not generate JSON without a system or user message instructing it # to do so. - variant :json_object, -> { OpenAI::Models::ResponseFormatJSONObject } + variant :json_object, -> { OpenAI::ResponseFormatJSONObject } + + # @!method self.variants + # @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject)] end end end diff --git a/lib/openai/models/responses/response_format_text_json_schema_config.rb b/lib/openai/models/responses/response_format_text_json_schema_config.rb index 34aefbc9..06e57803 100644 --- a/lib/openai/models/responses/response_format_text_json_schema_config.rb +++ b/lib/openai/models/responses/response_format_text_json_schema_config.rb @@ -3,13 +3,20 @@ module OpenAI module Models module Responses - class ResponseFormatTextJSONSchemaConfig < OpenAI::BaseModel + class ResponseFormatTextJSONSchemaConfig < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + # and dashes, with a maximum length of 64. + # + # @return [String] + required :name, String + # @!attribute schema # The schema for the response format, described as a JSON Schema object. Learn how - # to build JSON schemas [here](https://json-schema.org/). + # to build JSON schemas [here](https://json-schema.org/). # # @return [Hash{Symbol=>Object}] - required :schema, OpenAI::HashOf[OpenAI::Unknown] + required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] # @!attribute type # The type of response format being defined. Always `json_schema`. @@ -17,52 +24,41 @@ class ResponseFormatTextJSONSchemaConfig < OpenAI::BaseModel # @return [Symbol, :json_schema] required :type, const: :json_schema - # @!attribute [r] description + # @!attribute description # A description of what the response format is for, used by the model to determine - # how to respond in the format. + # how to respond in the format. # # @return [String, nil] optional :description, String - # @!parse - # # @return [String] - # attr_writer :description - - # @!attribute [r] name - # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores - # and dashes, with a maximum length of 64. - # - # @return [String, nil] - optional :name, String - - # @!parse - # # @return [String] - # attr_writer :name - # @!attribute strict # Whether to enable strict schema adherence when generating the output. If set to - # true, the model will always follow the exact schema defined in the `schema` - # field. Only a subset of JSON Schema is supported when `strict` is `true`. To - # learn more, read the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # # @return [Boolean, nil] - optional :strict, OpenAI::BooleanModel, nil?: true - - # @!parse - # # JSON Schema response format. Used to generate structured JSON responses. Learn - # # more about - # # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). - # # - # # @param schema [Hash{Symbol=>Object}] - # # @param description [String] - # # @param name [String] - # # @param strict [Boolean, nil] - # # @param type [Symbol, :json_schema] - # # - # def initialize(schema:, description: nil, name: nil, strict: nil, type: :json_schema, **) = super + optional :strict, OpenAI::Internal::Type::Boolean, nil?: true - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(name:, schema:, description: nil, strict: nil, type: :json_schema) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig} for more + # details. + # + # JSON Schema response format. Used to generate structured JSON responses. Learn + # more about + # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + # + # @param name [String] The name of the response format. Must be a-z, A-Z, 0-9, or contain + # + # @param schema [Hash{Symbol=>Object}] The schema for the response format, described as a JSON Schema object. + # + # @param description [String] A description of what the response format is for, used by the model to + # + # @param strict [Boolean, nil] Whether to enable strict schema adherence when generating the output. + # + # @param type [Symbol, :json_schema] The type of response format being defined. Always `json_schema`. end end end diff --git a/lib/openai/models/responses/response_function_call_arguments_delta_event.rb b/lib/openai/models/responses/response_function_call_arguments_delta_event.rb index 51e4b411..70a57547 100644 --- a/lib/openai/models/responses/response_function_call_arguments_delta_event.rb +++ b/lib/openai/models/responses/response_function_call_arguments_delta_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::BaseModel + class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel # @!attribute delta # The function-call arguments delta that is added. # @@ -22,23 +22,34 @@ class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.function_call_arguments.delta`. # # @return [Symbol, :"response.function_call_arguments.delta"] required :type, const: :"response.function_call_arguments.delta" - # @!parse - # # Emitted when there is a partial function-call arguments delta. - # # - # # @param delta [String] - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.function_call_arguments.delta"] - # # - # def initialize(delta:, item_id:, output_index:, type: :"response.function_call_arguments.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.function_call_arguments.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent} for more + # details. + # + # Emitted when there is a partial function-call arguments delta. + # + # @param delta [String] The function-call arguments delta that is added. + # + # @param item_id [String] The ID of the output item that the function-call arguments delta is added to. + # + # @param output_index [Integer] The index of the output item that the function-call arguments delta is added to. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.function_call_arguments.delta"] The type of the event. Always `response.function_call_arguments.delta`. end end end diff --git a/lib/openai/models/responses/response_function_call_arguments_done_event.rb b/lib/openai/models/responses/response_function_call_arguments_done_event.rb index 379a1df5..a5b29f4b 100644 --- a/lib/openai/models/responses/response_function_call_arguments_done_event.rb +++ b/lib/openai/models/responses/response_function_call_arguments_done_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseFunctionCallArgumentsDoneEvent < OpenAI::BaseModel + class ResponseFunctionCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel # @!attribute arguments # The function-call arguments. # @@ -22,22 +22,29 @@ class ResponseFunctionCallArgumentsDoneEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # # @return [Symbol, :"response.function_call_arguments.done"] required :type, const: :"response.function_call_arguments.done" - # @!parse - # # Emitted when function-call arguments are finalized. - # # - # # @param arguments [String] - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.function_call_arguments.done"] - # # - # def initialize(arguments:, item_id:, output_index:, type: :"response.function_call_arguments.done", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(arguments:, item_id:, output_index:, sequence_number:, type: :"response.function_call_arguments.done") + # Emitted when function-call arguments are finalized. + # + # @param arguments [String] The function-call arguments. + # + # @param item_id [String] The ID of the item. + # + # @param output_index [Integer] The index of the output item. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.function_call_arguments.done"] end end end diff --git a/lib/openai/models/responses/response_function_tool_call.rb b/lib/openai/models/responses/response_function_tool_call.rb index 065ee3ea..55602423 100644 --- a/lib/openai/models/responses/response_function_tool_call.rb +++ b/lib/openai/models/responses/response_function_tool_call.rb @@ -3,13 +3,7 @@ module OpenAI module Models module Responses - class ResponseFunctionToolCall < OpenAI::BaseModel - # @!attribute id - # The unique ID of the function tool call. - # - # @return [String] - required :id, String - + class ResponseFunctionToolCall < OpenAI::Internal::Type::BaseModel # @!attribute arguments # A JSON string of the arguments to pass to the function. # @@ -34,43 +28,52 @@ class ResponseFunctionToolCall < OpenAI::BaseModel # @return [Symbol, :function_call] required :type, const: :function_call - # @!attribute [r] status + # @!attribute id + # The unique ID of the function tool call. + # + # @return [String, nil] + optional :id, String + + # @!attribute status # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall::Status, nil] - optional :status, enum: -> { OpenAI::Models::Responses::ResponseFunctionToolCall::Status } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall::Status] - # attr_writer :status - - # @!parse - # # A tool call to run a function. See the - # # [function calling guide](https://platform.openai.com/docs/guides/function-calling) - # # for more information. - # # - # # @param id [String] - # # @param arguments [String] - # # @param call_id [String] - # # @param name [String] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall::Status] - # # @param type [Symbol, :function_call] - # # - # def initialize(id:, arguments:, call_id:, name:, status: nil, type: :function_call, **) = super + optional :status, enum: -> { OpenAI::Responses::ResponseFunctionToolCall::Status } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(arguments:, call_id:, name:, id: nil, status: nil, type: :function_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionToolCall} for more details. + # + # A tool call to run a function. See the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling) + # for more information. + # + # @param arguments [String] A JSON string of the arguments to pass to the function. + # + # @param call_id [String] The unique ID of the function tool call generated by the model. + # + # @param name [String] The name of the function to run. + # + # @param id [String] The unique ID of the function tool call. # + # @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall::Status] The status of the item. One of `in_progress`, `completed`, or + # + # @param type [Symbol, :function_call] The type of the function tool call. Always `function_call`. + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. - class Status < OpenAI::Enum + # Populated when items are returned via API. + # + # @see OpenAI::Models::Responses::ResponseFunctionToolCall#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_function_tool_call_item.rb b/lib/openai/models/responses/response_function_tool_call_item.rb new file mode 100644 index 00000000..b0cbb3be --- /dev/null +++ b/lib/openai/models/responses/response_function_tool_call_item.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseFunctionToolCallItem < OpenAI::Models::Responses::ResponseFunctionToolCall + # @!attribute id + # The unique ID of the function tool call. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionToolCallItem} for more details. + # + # A tool call to run a function. See the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling) + # for more information. + # + # @param id [String] The unique ID of the function tool call. + end + end + end +end diff --git a/lib/openai/models/responses/response_function_tool_call_output_item.rb b/lib/openai/models/responses/response_function_tool_call_output_item.rb new file mode 100644 index 00000000..bae34c36 --- /dev/null +++ b/lib/openai/models/responses/response_function_tool_call_output_item.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseFunctionToolCallOutputItem < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the function call tool output. + # + # @return [String] + required :id, String + + # @!attribute call_id + # The unique ID of the function tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute output + # A JSON string of the output of the function tool call. + # + # @return [String] + required :output, String + + # @!attribute type + # The type of the function tool call output. Always `function_call_output`. + # + # @return [Symbol, :function_call_output] + required :type, const: :function_call_output + + # @!attribute status + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::Status, nil] + optional :status, enum: -> { OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status } + + # @!method initialize(id:, call_id:, output:, status: nil, type: :function_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem} for more + # details. + # + # @param id [String] The unique ID of the function call tool output. + # + # @param call_id [String] The unique ID of the function tool call generated by the model. + # + # @param output [String] A JSON string of the output of the function tool call. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::Status] The status of the item. One of `in_progress`, `completed`, or + # + # @param type [Symbol, :function_call_output] The type of the function tool call output. Always `function_call_output`. + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + # + # @see OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/responses/response_function_web_search.rb b/lib/openai/models/responses/response_function_web_search.rb index 8afd6c8b..f8360067 100644 --- a/lib/openai/models/responses/response_function_web_search.rb +++ b/lib/openai/models/responses/response_function_web_search.rb @@ -3,18 +3,25 @@ module OpenAI module Models module Responses - class ResponseFunctionWebSearch < OpenAI::BaseModel + class ResponseFunctionWebSearch < OpenAI::Internal::Type::BaseModel # @!attribute id # The unique ID of the web search tool call. # # @return [String] required :id, String + # @!attribute action + # An object describing the specific action taken in this web search call. Includes + # details on how the model used the web (search, open_page, find). + # + # @return [OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find] + required :action, union: -> { OpenAI::Responses::ResponseFunctionWebSearch::Action } + # @!attribute status # The status of the web search tool call. # # @return [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch::Status] - required :status, enum: -> { OpenAI::Models::Responses::ResponseFunctionWebSearch::Status } + required :status, enum: -> { OpenAI::Responses::ResponseFunctionWebSearch::Status } # @!attribute type # The type of the web search tool call. Always `web_search_call`. @@ -22,29 +29,140 @@ class ResponseFunctionWebSearch < OpenAI::BaseModel # @return [Symbol, :web_search_call] required :type, const: :web_search_call - # @!parse - # # The results of a web search tool call. See the - # # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for - # # more information. - # # - # # @param id [String] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch::Status] - # # @param type [Symbol, :web_search_call] - # # - # def initialize(id:, status:, type: :web_search_call, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, action:, status:, type: :web_search_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionWebSearch} for more details. + # + # The results of a web search tool call. See the + # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + # more information. + # + # @param id [String] The unique ID of the web search tool call. + # + # @param action [OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find] An object describing the specific action taken in this web search call. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch::Status] The status of the web search tool call. + # + # @param type [Symbol, :web_search_call] The type of the web search tool call. Always `web_search_call`. - # @abstract + # An object describing the specific action taken in this web search call. Includes + # details on how the model used the web (search, open_page, find). # + # @see OpenAI::Models::Responses::ResponseFunctionWebSearch#action + module Action + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Action type "search" - Performs a web search query. + variant :search, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::Search } + + # Action type "open_page" - Opens a specific URL from search results. + variant :open_page, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage } + + # Action type "find": Searches for a pattern within a loaded page. + variant :find, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::Find } + + class Search < OpenAI::Internal::Type::BaseModel + # @!attribute query + # The search query. + # + # @return [String] + required :query, String + + # @!attribute type + # The action type. + # + # @return [Symbol, :search] + required :type, const: :search + + # @!method initialize(query:, type: :search) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search} for more + # details. + # + # Action type "search" - Performs a web search query. + # + # @param query [String] The search query. + # + # @param type [Symbol, :search] The action type. + end + + class OpenPage < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The action type. + # + # @return [Symbol, :open_page] + required :type, const: :open_page + + # @!attribute url + # The URL opened by the model. + # + # @return [String] + required :url, String + + # @!method initialize(url:, type: :open_page) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage} for + # more details. + # + # Action type "open_page" - Opens a specific URL from search results. + # + # @param url [String] The URL opened by the model. + # + # @param type [Symbol, :open_page] The action type. + end + + class Find < OpenAI::Internal::Type::BaseModel + # @!attribute pattern + # The pattern or text to search for within the page. + # + # @return [String] + required :pattern, String + + # @!attribute type + # The action type. + # + # @return [Symbol, :find] + required :type, const: :find + + # @!attribute url + # The URL of the page searched for the pattern. + # + # @return [String] + required :url, String + + # @!method initialize(pattern:, url:, type: :find) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find} for more + # details. + # + # Action type "find": Searches for a pattern within a loaded page. + # + # @param pattern [String] The pattern or text to search for within the page. + # + # @param url [String] The URL of the page searched for the pattern. + # + # @param type [Symbol, :find] The action type. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find)] + end + # The status of the web search tool call. - class Status < OpenAI::Enum + # + # @see OpenAI::Models::Responses::ResponseFunctionWebSearch#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress SEARCHING = :searching COMPLETED = :completed FAILED = :failed - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_image_gen_call_completed_event.rb b/lib/openai/models/responses/response_image_gen_call_completed_event.rb new file mode 100644 index 00000000..ceccdcf6 --- /dev/null +++ b/lib/openai/models/responses/response_image_gen_call_completed_event.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseImageGenCallCompletedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The unique identifier of the image generation item being processed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item in the response's output array. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.image_generation_call.completed'. + # + # @return [Symbol, :"response.image_generation_call.completed"] + required :type, const: :"response.image_generation_call.completed" + + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.image_generation_call.completed") + # Emitted when an image generation tool call has completed and the final image is + # available. + # + # @param item_id [String] The unique identifier of the image generation item being processed. + # + # @param output_index [Integer] The index of the output item in the response's output array. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.image_generation_call.completed"] The type of the event. Always 'response.image_generation_call.completed'. + end + end + end +end diff --git a/lib/openai/models/responses/response_image_gen_call_generating_event.rb b/lib/openai/models/responses/response_image_gen_call_generating_event.rb new file mode 100644 index 00000000..d8f9683d --- /dev/null +++ b/lib/openai/models/responses/response_image_gen_call_generating_event.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseImageGenCallGeneratingEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The unique identifier of the image generation item being processed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item in the response's output array. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of the image generation item being processed. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.image_generation_call.generating'. + # + # @return [Symbol, :"response.image_generation_call.generating"] + required :type, const: :"response.image_generation_call.generating" + + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.image_generation_call.generating") + # Emitted when an image generation tool call is actively generating an image + # (intermediate state). + # + # @param item_id [String] The unique identifier of the image generation item being processed. + # + # @param output_index [Integer] The index of the output item in the response's output array. + # + # @param sequence_number [Integer] The sequence number of the image generation item being processed. + # + # @param type [Symbol, :"response.image_generation_call.generating"] The type of the event. Always 'response.image_generation_call.generating'. + end + end + end +end diff --git a/lib/openai/models/responses/response_image_gen_call_in_progress_event.rb b/lib/openai/models/responses/response_image_gen_call_in_progress_event.rb new file mode 100644 index 00000000..83d9ac0a --- /dev/null +++ b/lib/openai/models/responses/response_image_gen_call_in_progress_event.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseImageGenCallInProgressEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The unique identifier of the image generation item being processed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item in the response's output array. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of the image generation item being processed. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.image_generation_call.in_progress'. + # + # @return [Symbol, :"response.image_generation_call.in_progress"] + required :type, const: :"response.image_generation_call.in_progress" + + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.image_generation_call.in_progress") + # Emitted when an image generation tool call is in progress. + # + # @param item_id [String] The unique identifier of the image generation item being processed. + # + # @param output_index [Integer] The index of the output item in the response's output array. + # + # @param sequence_number [Integer] The sequence number of the image generation item being processed. + # + # @param type [Symbol, :"response.image_generation_call.in_progress"] The type of the event. Always 'response.image_generation_call.in_progress'. + end + end + end +end diff --git a/lib/openai/models/responses/response_image_gen_call_partial_image_event.rb b/lib/openai/models/responses/response_image_gen_call_partial_image_event.rb new file mode 100644 index 00000000..36a2c557 --- /dev/null +++ b/lib/openai/models/responses/response_image_gen_call_partial_image_event.rb @@ -0,0 +1,65 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseImageGenCallPartialImageEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The unique identifier of the image generation item being processed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item in the response's output array. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute partial_image_b64 + # Base64-encoded partial image data, suitable for rendering as an image. + # + # @return [String] + required :partial_image_b64, String + + # @!attribute partial_image_index + # 0-based index for the partial image (backend is 1-based, but this is 0-based for + # the user). + # + # @return [Integer] + required :partial_image_index, Integer + + # @!attribute sequence_number + # The sequence number of the image generation item being processed. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.image_generation_call.partial_image'. + # + # @return [Symbol, :"response.image_generation_call.partial_image"] + required :type, const: :"response.image_generation_call.partial_image" + + # @!method initialize(item_id:, output_index:, partial_image_b64:, partial_image_index:, sequence_number:, type: :"response.image_generation_call.partial_image") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent} for more + # details. + # + # Emitted when a partial image is available during image generation streaming. + # + # @param item_id [String] The unique identifier of the image generation item being processed. + # + # @param output_index [Integer] The index of the output item in the response's output array. + # + # @param partial_image_b64 [String] Base64-encoded partial image data, suitable for rendering as an image. + # + # @param partial_image_index [Integer] 0-based index for the partial image (backend is 1-based, but this is 0-based for + # + # @param sequence_number [Integer] The sequence number of the image generation item being processed. + # + # @param type [Symbol, :"response.image_generation_call.partial_image"] The type of the event. Always 'response.image_generation_call.partial_image'. + end + end + end +end diff --git a/lib/openai/models/responses/response_in_progress_event.rb b/lib/openai/models/responses/response_in_progress_event.rb index 5f61d802..775c9b0c 100644 --- a/lib/openai/models/responses/response_in_progress_event.rb +++ b/lib/openai/models/responses/response_in_progress_event.rb @@ -3,12 +3,18 @@ module OpenAI module Models module Responses - class ResponseInProgressEvent < OpenAI::BaseModel + class ResponseInProgressEvent < OpenAI::Internal::Type::BaseModel # @!attribute response # The response that is in progress. # # @return [OpenAI::Models::Responses::Response] - required :response, -> { OpenAI::Models::Responses::Response } + required :response, -> { OpenAI::Responses::Response } + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer # @!attribute type # The type of the event. Always `response.in_progress`. @@ -16,15 +22,17 @@ class ResponseInProgressEvent < OpenAI::BaseModel # @return [Symbol, :"response.in_progress"] required :type, const: :"response.in_progress" - # @!parse - # # Emitted when the response is in progress. - # # - # # @param response [OpenAI::Models::Responses::Response] - # # @param type [Symbol, :"response.in_progress"] - # # - # def initialize(response:, type: :"response.in_progress", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(response:, sequence_number:, type: :"response.in_progress") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInProgressEvent} for more details. + # + # Emitted when the response is in progress. + # + # @param response [OpenAI::Models::Responses::Response] The response that is in progress. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.in_progress"] The type of the event. Always `response.in_progress`. end end end diff --git a/lib/openai/models/responses/response_includable.rb b/lib/openai/models/responses/response_includable.rb index 2d318473..bfd6f54d 100644 --- a/lib/openai/models/responses/response_includable.rb +++ b/lib/openai/models/responses/response_includable.rb @@ -3,22 +3,34 @@ module OpenAI module Models module Responses - # @abstract - # # Specify additional output data to include in the model response. Currently - # supported values are: + # supported values are: # - # - `file_search_call.results`: Include the search results of the file search tool - # call. - # - `message.input_image.image_url`: Include image urls from the input message. - # - `computer_call_output.output.image_url`: Include image urls from the computer - # call output. - class ResponseIncludable < OpenAI::Enum + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). + module ResponseIncludable + extend OpenAI::Internal::Type::Enum + + CODE_INTERPRETER_CALL_OUTPUTS = :"code_interpreter_call.outputs" + COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = :"computer_call_output.output.image_url" FILE_SEARCH_CALL_RESULTS = :"file_search_call.results" MESSAGE_INPUT_IMAGE_IMAGE_URL = :"message.input_image.image_url" - COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = :"computer_call_output.output.image_url" + MESSAGE_OUTPUT_TEXT_LOGPROBS = :"message.output_text.logprobs" + REASONING_ENCRYPTED_CONTENT = :"reasoning.encrypted_content" - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_incomplete_event.rb b/lib/openai/models/responses/response_incomplete_event.rb index 8f0f240c..bd1402da 100644 --- a/lib/openai/models/responses/response_incomplete_event.rb +++ b/lib/openai/models/responses/response_incomplete_event.rb @@ -3,12 +3,18 @@ module OpenAI module Models module Responses - class ResponseIncompleteEvent < OpenAI::BaseModel + class ResponseIncompleteEvent < OpenAI::Internal::Type::BaseModel # @!attribute response # The response that was incomplete. # # @return [OpenAI::Models::Responses::Response] - required :response, -> { OpenAI::Models::Responses::Response } + required :response, -> { OpenAI::Responses::Response } + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer # @!attribute type # The type of the event. Always `response.incomplete`. @@ -16,15 +22,17 @@ class ResponseIncompleteEvent < OpenAI::BaseModel # @return [Symbol, :"response.incomplete"] required :type, const: :"response.incomplete" - # @!parse - # # An event that is emitted when a response finishes as incomplete. - # # - # # @param response [OpenAI::Models::Responses::Response] - # # @param type [Symbol, :"response.incomplete"] - # # - # def initialize(response:, type: :"response.incomplete", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(response:, sequence_number:, type: :"response.incomplete") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseIncompleteEvent} for more details. + # + # An event that is emitted when a response finishes as incomplete. + # + # @param response [OpenAI::Models::Responses::Response] The response that was incomplete. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.incomplete"] The type of the event. Always `response.incomplete`. end end end diff --git a/lib/openai/models/responses/response_input.rb b/lib/openai/models/responses/response_input.rb index d8565c95..e23a7b8d 100644 --- a/lib/openai/models/responses/response_input.rb +++ b/lib/openai/models/responses/response_input.rb @@ -3,7 +3,8 @@ module OpenAI module Models module Responses - ResponseInput = OpenAI::ArrayOf[union: -> { OpenAI::Models::Responses::ResponseInputItem }] + # @type [OpenAI::Internal::Type::Converter] + ResponseInput = OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputItem }] end end end diff --git a/lib/openai/models/responses/response_input_audio.rb b/lib/openai/models/responses/response_input_audio.rb index 24201588..ae4a6908 100644 --- a/lib/openai/models/responses/response_input_audio.rb +++ b/lib/openai/models/responses/response_input_audio.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseInputAudio < OpenAI::BaseModel + class ResponseInputAudio < OpenAI::Internal::Type::BaseModel # @!attribute data # Base64-encoded audio data. # @@ -14,7 +14,7 @@ class ResponseInputAudio < OpenAI::BaseModel # The format of the audio data. Currently supported formats are `mp3` and `wav`. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputAudio::Format] - required :format_, enum: -> { OpenAI::Models::Responses::ResponseInputAudio::Format }, api_name: :format + required :format_, enum: -> { OpenAI::Responses::ResponseInputAudio::Format }, api_name: :format # @!attribute type # The type of the input item. Always `input_audio`. @@ -22,25 +22,29 @@ class ResponseInputAudio < OpenAI::BaseModel # @return [Symbol, :input_audio] required :type, const: :input_audio - # @!parse - # # An audio input to the model. - # # - # # @param data [String] - # # @param format_ [Symbol, OpenAI::Models::Responses::ResponseInputAudio::Format] - # # @param type [Symbol, :input_audio] - # # - # def initialize(data:, format_:, type: :input_audio, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(data:, format_:, type: :input_audio) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputAudio} for more details. + # + # An audio input to the model. + # + # @param data [String] Base64-encoded audio data. # + # @param format_ [Symbol, OpenAI::Models::Responses::ResponseInputAudio::Format] The format of the audio data. Currently supported formats are `mp3` and + # + # @param type [Symbol, :input_audio] The type of the input item. Always `input_audio`. + # The format of the audio data. Currently supported formats are `mp3` and `wav`. - class Format < OpenAI::Enum + # + # @see OpenAI::Models::Responses::ResponseInputAudio#format_ + module Format + extend OpenAI::Internal::Type::Enum + MP3 = :mp3 WAV = :wav - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_input_content.rb b/lib/openai/models/responses/response_input_content.rb index 1ce3a1ca..59c6970e 100644 --- a/lib/openai/models/responses/response_input_content.rb +++ b/lib/openai/models/responses/response_input_content.rb @@ -3,20 +3,23 @@ module OpenAI module Models module Responses - # @abstract - # # A text input to the model. - class ResponseInputContent < OpenAI::Union + module ResponseInputContent + extend OpenAI::Internal::Type::Union + discriminator :type # A text input to the model. - variant :input_text, -> { OpenAI::Models::Responses::ResponseInputText } + variant :input_text, -> { OpenAI::Responses::ResponseInputText } # An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision). - variant :input_image, -> { OpenAI::Models::Responses::ResponseInputImage } + variant :input_image, -> { OpenAI::Responses::ResponseInputImage } # A file input to the model. - variant :input_file, -> { OpenAI::Models::Responses::ResponseInputFile } + variant :input_file, -> { OpenAI::Responses::ResponseInputFile } + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile)] end end end diff --git a/lib/openai/models/responses/response_input_file.rb b/lib/openai/models/responses/response_input_file.rb index a6e595f9..9aee8404 100644 --- a/lib/openai/models/responses/response_input_file.rb +++ b/lib/openai/models/responses/response_input_file.rb @@ -3,54 +3,52 @@ module OpenAI module Models module Responses - class ResponseInputFile < OpenAI::BaseModel + class ResponseInputFile < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of the input item. Always `input_file`. # # @return [Symbol, :input_file] required :type, const: :input_file - # @!attribute [r] file_data + # @!attribute file_data # The content of the file to be sent to the model. # # @return [String, nil] optional :file_data, String - # @!parse - # # @return [String] - # attr_writer :file_data - - # @!attribute [r] file_id + # @!attribute file_id # The ID of the file to be sent to the model. # # @return [String, nil] - optional :file_id, String + optional :file_id, String, nil?: true - # @!parse - # # @return [String] - # attr_writer :file_id + # @!attribute file_url + # The URL of the file to be sent to the model. + # + # @return [String, nil] + optional :file_url, String - # @!attribute [r] filename + # @!attribute filename # The name of the file to be sent to the model. # # @return [String, nil] optional :filename, String - # @!parse - # # @return [String] - # attr_writer :filename - - # @!parse - # # A file input to the model. - # # - # # @param file_data [String] - # # @param file_id [String] - # # @param filename [String] - # # @param type [Symbol, :input_file] - # # - # def initialize(file_data: nil, file_id: nil, filename: nil, type: :input_file, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_data: nil, file_id: nil, file_url: nil, filename: nil, type: :input_file) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputFile} for more details. + # + # A file input to the model. + # + # @param file_data [String] The content of the file to be sent to the model. + # + # @param file_id [String, nil] The ID of the file to be sent to the model. + # + # @param file_url [String] The URL of the file to be sent to the model. + # + # @param filename [String] The name of the file to be sent to the model. + # + # @param type [Symbol, :input_file] The type of the input item. Always `input_file`. end end end diff --git a/lib/openai/models/responses/response_input_image.rb b/lib/openai/models/responses/response_input_image.rb index e9334fd0..261bd7d6 100644 --- a/lib/openai/models/responses/response_input_image.rb +++ b/lib/openai/models/responses/response_input_image.rb @@ -3,13 +3,13 @@ module OpenAI module Models module Responses - class ResponseInputImage < OpenAI::BaseModel + class ResponseInputImage < OpenAI::Internal::Type::BaseModel # @!attribute detail # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. + # `auto`. Defaults to `auto`. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputImage::Detail] - required :detail, enum: -> { OpenAI::Models::Responses::ResponseInputImage::Detail } + required :detail, enum: -> { OpenAI::Responses::ResponseInputImage::Detail } # @!attribute type # The type of the input item. Always `input_image`. @@ -25,34 +25,39 @@ class ResponseInputImage < OpenAI::BaseModel # @!attribute image_url # The URL of the image to be sent to the model. A fully qualified URL or base64 - # encoded image in a data URL. + # encoded image in a data URL. # # @return [String, nil] optional :image_url, String, nil?: true - # @!parse - # # An image input to the model. Learn about - # # [image inputs](https://platform.openai.com/docs/guides/vision). - # # - # # @param detail [Symbol, OpenAI::Models::Responses::ResponseInputImage::Detail] - # # @param file_id [String, nil] - # # @param image_url [String, nil] - # # @param type [Symbol, :input_image] - # # - # def initialize(detail:, file_id: nil, image_url: nil, type: :input_image, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(detail:, file_id: nil, image_url: nil, type: :input_image) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputImage} for more details. + # + # An image input to the model. Learn about + # [image inputs](https://platform.openai.com/docs/guides/vision). + # + # @param detail [Symbol, OpenAI::Models::Responses::ResponseInputImage::Detail] The detail level of the image to be sent to the model. One of `high`, `low`, or + # + # @param file_id [String, nil] The ID of the file to be sent to the model. + # + # @param image_url [String, nil] The URL of the image to be sent to the model. A fully qualified URL or base64 en # + # @param type [Symbol, :input_image] The type of the input item. Always `input_image`. + # The detail level of the image to be sent to the model. One of `high`, `low`, or - # `auto`. Defaults to `auto`. - class Detail < OpenAI::Enum - HIGH = :high + # `auto`. Defaults to `auto`. + # + # @see OpenAI::Models::Responses::ResponseInputImage#detail + module Detail + extend OpenAI::Internal::Type::Enum + LOW = :low + HIGH = :high AUTO = :auto - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_input_item.rb b/lib/openai/models/responses/response_input_item.rb index c94a979c..c1aa3165 100644 --- a/lib/openai/models/responses/response_input_item.rb +++ b/lib/openai/models/responses/response_input_item.rb @@ -3,14 +3,14 @@ module OpenAI module Models module Responses - # @abstract - # # A message input to the model with a role indicating instruction following - # hierarchy. Instructions given with the `developer` or `system` role take - # precedence over instructions given with the `user` role. Messages with the - # `assistant` role are presumed to have been generated by the model in previous - # interactions. - class ResponseInputItem < OpenAI::Union + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module ResponseInputItem + extend OpenAI::Internal::Type::Union + discriminator :type # A message input to the model with a role indicating instruction following @@ -18,128 +18,163 @@ class ResponseInputItem < OpenAI::Union # precedence over instructions given with the `user` role. Messages with the # `assistant` role are presumed to have been generated by the model in previous # interactions. - variant :message, -> { OpenAI::Models::Responses::EasyInputMessage } + variant :message, -> { OpenAI::Responses::EasyInputMessage } # A message input to the model with a role indicating instruction following # hierarchy. Instructions given with the `developer` or `system` role take # precedence over instructions given with the `user` role. - variant :message, -> { OpenAI::Models::Responses::ResponseInputItem::Message } + variant :message, -> { OpenAI::Responses::ResponseInputItem::Message } # An output message from the model. - variant :message, -> { OpenAI::Models::Responses::ResponseOutputMessage } + variant :message, -> { OpenAI::Responses::ResponseOutputMessage } # The results of a file search tool call. See the # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information. - variant :file_search_call, -> { OpenAI::Models::Responses::ResponseFileSearchToolCall } + variant :file_search_call, -> { OpenAI::Responses::ResponseFileSearchToolCall } # A tool call to a computer use tool. See the # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information. - variant :computer_call, -> { OpenAI::Models::Responses::ResponseComputerToolCall } + variant :computer_call, -> { OpenAI::Responses::ResponseComputerToolCall } # The output of a computer tool call. - variant :computer_call_output, -> { OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput } + variant :computer_call_output, -> { OpenAI::Responses::ResponseInputItem::ComputerCallOutput } # The results of a web search tool call. See the # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information. - variant :web_search_call, -> { OpenAI::Models::Responses::ResponseFunctionWebSearch } + variant :web_search_call, -> { OpenAI::Responses::ResponseFunctionWebSearch } # A tool call to run a function. See the # [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. - variant :function_call, -> { OpenAI::Models::Responses::ResponseFunctionToolCall } + variant :function_call, -> { OpenAI::Responses::ResponseFunctionToolCall } # The output of a function tool call. - variant :function_call_output, -> { OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput } + variant :function_call_output, -> { OpenAI::Responses::ResponseInputItem::FunctionCallOutput } # A description of the chain of thought used by a reasoning model while generating - # a response. - variant :reasoning, -> { OpenAI::Models::Responses::ResponseReasoningItem } + # a response. Be sure to include these items in your `input` to the Responses API + # for subsequent turns of a conversation if you are manually + # [managing context](https://platform.openai.com/docs/guides/conversation-state). + variant :reasoning, -> { OpenAI::Responses::ResponseReasoningItem } + + # An image generation request made by the model. + variant :image_generation_call, -> { OpenAI::Responses::ResponseInputItem::ImageGenerationCall } + + # A tool call to run code. + variant :code_interpreter_call, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall } + + # A tool call to run a command on the local shell. + variant :local_shell_call, -> { OpenAI::Responses::ResponseInputItem::LocalShellCall } + + # The output of a local shell tool call. + variant :local_shell_call_output, -> { OpenAI::Responses::ResponseInputItem::LocalShellCallOutput } + + # A list of tools available on an MCP server. + variant :mcp_list_tools, -> { OpenAI::Responses::ResponseInputItem::McpListTools } + + # A request for human approval of a tool invocation. + variant :mcp_approval_request, -> { OpenAI::Responses::ResponseInputItem::McpApprovalRequest } + + # A response to an MCP approval request. + variant :mcp_approval_response, -> { OpenAI::Responses::ResponseInputItem::McpApprovalResponse } + + # An invocation of a tool on an MCP server. + variant :mcp_call, -> { OpenAI::Responses::ResponseInputItem::McpCall } + + # The output of a custom tool call from your code, being sent back to the model. + variant :custom_tool_call_output, -> { OpenAI::Responses::ResponseCustomToolCallOutput } + + # A call to a custom tool created by the model. + variant :custom_tool_call, -> { OpenAI::Responses::ResponseCustomToolCall } # An internal identifier for an item to reference. - variant :item_reference, -> { OpenAI::Models::Responses::ResponseInputItem::ItemReference } + variant :item_reference, -> { OpenAI::Responses::ResponseInputItem::ItemReference } - class Message < OpenAI::BaseModel + class Message < OpenAI::Internal::Type::BaseModel # @!attribute content # A list of one or many input items to the model, containing different content - # types. + # types. # # @return [Array] - required :content, -> { OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseInputContent] } + required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] } # @!attribute role # The role of the message input. One of `user`, `system`, or `developer`. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Role] - required :role, enum: -> { OpenAI::Models::Responses::ResponseInputItem::Message::Role } + required :role, enum: -> { OpenAI::Responses::ResponseInputItem::Message::Role } - # @!attribute [r] status + # @!attribute status # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Status, nil] - optional :status, enum: -> { OpenAI::Models::Responses::ResponseInputItem::Message::Status } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Status] - # attr_writer :status + optional :status, enum: -> { OpenAI::Responses::ResponseInputItem::Message::Status } - # @!attribute [r] type + # @!attribute type # The type of the message input. Always set to `message`. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Type, nil] - optional :type, enum: -> { OpenAI::Models::Responses::ResponseInputItem::Message::Type } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Type] - # attr_writer :type + optional :type, enum: -> { OpenAI::Responses::ResponseInputItem::Message::Type } - # @!parse - # # A message input to the model with a role indicating instruction following - # # hierarchy. Instructions given with the `developer` or `system` role take - # # precedence over instructions given with the `user` role. - # # - # # @param content [Array] - # # @param role [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Role] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Status] - # # @param type [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Type] - # # - # def initialize(content:, role:, status: nil, type: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(content:, role:, status: nil, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::Message} for more details. + # + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. + # + # @param content [Array] A list of one or many input items to the model, containing different content + # + # @param role [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Role] The role of the message input. One of `user`, `system`, or `developer`. # + # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Status] The status of item. One of `in_progress`, `completed`, or + # + # @param type [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Type] The type of the message input. Always set to `message`. + # The role of the message input. One of `user`, `system`, or `developer`. - class Role < OpenAI::Enum + # + # @see OpenAI::Models::Responses::ResponseInputItem::Message#role + module Role + extend OpenAI::Internal::Type::Enum + USER = :user SYSTEM = :system DEVELOPER = :developer - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. - class Status < OpenAI::Enum + # Populated when items are returned via API. + # + # @see OpenAI::Models::Responses::ResponseInputItem::Message#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # The type of the message input. Always set to `message`. - class Type < OpenAI::Enum + # + # @see OpenAI::Models::Responses::ResponseInputItem::Message#type + module Type + extend OpenAI::Internal::Type::Enum + MESSAGE = :message - finalize! + # @!method self.values + # @return [Array] end end - class ComputerCallOutput < OpenAI::BaseModel + class ComputerCallOutput < OpenAI::Internal::Type::BaseModel # @!attribute call_id # The ID of the computer tool call that produced the output. # @@ -149,8 +184,8 @@ class ComputerCallOutput < OpenAI::BaseModel # @!attribute output # A computer screenshot image used with the computer use tool. # - # @return [OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output] - required :output, -> { OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output } + # @return [OpenAI::Models::Responses::ResponseComputerToolCallOutputScreenshot] + required :output, -> { OpenAI::Responses::ResponseComputerToolCallOutputScreenshot } # @!attribute type # The type of the computer tool call output. Always `computer_call_output`. @@ -158,94 +193,52 @@ class ComputerCallOutput < OpenAI::BaseModel # @return [Symbol, :computer_call_output] required :type, const: :computer_call_output - # @!attribute [r] id + # @!attribute id # The ID of the computer tool call output. # # @return [String, nil] - optional :id, String - - # @!parse - # # @return [String] - # attr_writer :id + optional :id, String, nil?: true - # @!attribute [r] acknowledged_safety_checks + # @!attribute acknowledged_safety_checks # The safety checks reported by the API that have been acknowledged by the - # developer. + # developer. # # @return [Array, nil] optional :acknowledged_safety_checks, - -> { OpenAI::ArrayOf[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck] } + -> { + OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck] + }, + nil?: true - # @!parse - # # @return [Array] - # attr_writer :acknowledged_safety_checks - - # @!attribute [r] status + # @!attribute status # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Status, nil] - optional :status, enum: -> { OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Status } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Status] - # attr_writer :status - - # @!parse - # # The output of a computer tool call. - # # - # # @param call_id [String] - # # @param output [OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output] - # # @param id [String] - # # @param acknowledged_safety_checks [Array] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Status] - # # @param type [Symbol, :computer_call_output] - # # - # def initialize(call_id:, output:, id: nil, acknowledged_safety_checks: nil, status: nil, type: :computer_call_output, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Output < OpenAI::BaseModel - # @!attribute type - # Specifies the event type. For a computer screenshot, this property is always set - # to `computer_screenshot`. - # - # @return [Symbol, :computer_screenshot] - required :type, const: :computer_screenshot - - # @!attribute [r] file_id - # The identifier of an uploaded file that contains the screenshot. - # - # @return [String, nil] - optional :file_id, String - - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!attribute [r] image_url - # The URL of the screenshot image. - # - # @return [String, nil] - optional :image_url, String - - # @!parse - # # @return [String] - # attr_writer :image_url - - # @!parse - # # A computer screenshot image used with the computer use tool. - # # - # # @param file_id [String] - # # @param image_url [String] - # # @param type [Symbol, :computer_screenshot] - # # - # def initialize(file_id: nil, image_url: nil, type: :computer_screenshot, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - end + optional :status, + enum: -> { OpenAI::Responses::ResponseInputItem::ComputerCallOutput::Status }, + nil?: true + + # @!method initialize(call_id:, output:, id: nil, acknowledged_safety_checks: nil, status: nil, type: :computer_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput} for more + # details. + # + # The output of a computer tool call. + # + # @param call_id [String] The ID of the computer tool call that produced the output. + # + # @param output [OpenAI::Models::Responses::ResponseComputerToolCallOutputScreenshot] A computer screenshot image used with the computer use tool. + # + # @param id [String, nil] The ID of the computer tool call output. + # + # @param acknowledged_safety_checks [Array, nil] The safety checks reported by the API that have been acknowledged by the develop + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Status, nil] The status of the message input. One of `in_progress`, `completed`, or `incomple + # + # @param type [Symbol, :computer_call_output] The type of the computer tool call output. Always `computer_call_output`. - class AcknowledgedSafetyCheck < OpenAI::BaseModel + class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel # @!attribute id # The ID of the pending safety check. # @@ -255,41 +248,42 @@ class AcknowledgedSafetyCheck < OpenAI::BaseModel # @!attribute code # The type of the pending safety check. # - # @return [String] - required :code, String + # @return [String, nil] + optional :code, String, nil?: true # @!attribute message # Details about the pending safety check. # - # @return [String] - required :message, String - - # @!parse - # # A pending safety check for the computer call. - # # - # # @param id [String] - # # @param code [String] - # # @param message [String] - # # - # def initialize(id:, code:, message:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @return [String, nil] + optional :message, String, nil?: true + + # @!method initialize(id:, code: nil, message: nil) + # A pending safety check for the computer call. + # + # @param id [String] The ID of the pending safety check. + # + # @param code [String, nil] The type of the pending safety check. + # + # @param message [String, nil] Details about the pending safety check. end - # @abstract - # # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. - class Status < OpenAI::Enum + # `incomplete`. Populated when input items are returned via API. + # + # @see OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - finalize! + # @!method self.values + # @return [Array] end end - class FunctionCallOutput < OpenAI::BaseModel + class FunctionCallOutput < OpenAI::Internal::Type::BaseModel # @!attribute call_id # The unique ID of the function tool call generated by the model. # @@ -308,55 +302,541 @@ class FunctionCallOutput < OpenAI::BaseModel # @return [Symbol, :function_call_output] required :type, const: :function_call_output - # @!attribute [r] id + # @!attribute id # The unique ID of the function tool call output. Populated when this item is - # returned via API. + # returned via API. # # @return [String, nil] - optional :id, String + optional :id, String, nil?: true - # @!parse - # # @return [String] - # attr_writer :id - - # @!attribute [r] status + # @!attribute status # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::Status, nil] - optional :status, enum: -> { OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::Status } + optional :status, + enum: -> { OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Status }, + nil?: true + + # @!method initialize(call_id:, output:, id: nil, status: nil, type: :function_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput} for more + # details. + # + # The output of a function tool call. + # + # @param call_id [String] The unique ID of the function tool call generated by the model. + # + # @param output [String] A JSON string of the output of the function tool call. + # + # @param id [String, nil] The unique ID of the function tool call output. Populated when this item is retu + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::Status, nil] The status of the item. One of `in_progress`, `completed`, or `incomplete`. Popu + # + # @param type [Symbol, :function_call_output] The type of the function tool call output. Always `function_call_output`. - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::Status] - # attr_writer :status + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + # + # @see OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput#status + module Status + extend OpenAI::Internal::Type::Enum - # @!parse - # # The output of a function tool call. - # # - # # @param call_id [String] - # # @param output [String] - # # @param id [String] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::Status] - # # @param type [Symbol, :function_call_output] - # # - # def initialize(call_id:, output:, id: nil, status: nil, type: :function_call_output, **) = super + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method self.values + # @return [Array] + end + end - # @abstract + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the image generation call. + # + # @return [String] + required :id, String + + # @!attribute result + # The generated image encoded in base64. + # + # @return [String, nil] + required :result, String, nil?: true + + # @!attribute status + # The status of the image generation call. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall::Status] + required :status, enum: -> { OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status } + + # @!attribute type + # The type of the image generation call. Always `image_generation_call`. + # + # @return [Symbol, :image_generation_call] + required :type, const: :image_generation_call + + # @!method initialize(id:, result:, status:, type: :image_generation_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall} for more + # details. + # + # An image generation request made by the model. + # + # @param id [String] The unique ID of the image generation call. + # + # @param result [String, nil] The generated image encoded in base64. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall::Status] The status of the image generation call. + # + # @param type [Symbol, :image_generation_call] The type of the image generation call. Always `image_generation_call`. + + # The status of the image generation call. + # + # @see OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + GENERATING = :generating + FAILED = :failed + + # @!method self.values + # @return [Array] + end + end + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the local shell call. + # + # @return [String] + required :id, String + + # @!attribute action + # Execute a shell command on the server. + # + # @return [OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::Action] + required :action, -> { OpenAI::Responses::ResponseInputItem::LocalShellCall::Action } + + # @!attribute call_id + # The unique ID of the local shell tool call generated by the model. # + # @return [String] + required :call_id, String + + # @!attribute status + # The status of the local shell call. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::Status] + required :status, enum: -> { OpenAI::Responses::ResponseInputItem::LocalShellCall::Status } + + # @!attribute type + # The type of the local shell call. Always `local_shell_call`. + # + # @return [Symbol, :local_shell_call] + required :type, const: :local_shell_call + + # @!method initialize(id:, action:, call_id:, status:, type: :local_shell_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::LocalShellCall} for more details. + # + # A tool call to run a command on the local shell. + # + # @param id [String] The unique ID of the local shell call. + # + # @param action [OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::Action] Execute a shell command on the server. + # + # @param call_id [String] The unique ID of the local shell tool call generated by the model. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::Status] The status of the local shell call. + # + # @param type [Symbol, :local_shell_call] The type of the local shell call. Always `local_shell_call`. + + # @see OpenAI::Models::Responses::ResponseInputItem::LocalShellCall#action + class Action < OpenAI::Internal::Type::BaseModel + # @!attribute command + # The command to run. + # + # @return [Array] + required :command, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute env + # Environment variables to set for the command. + # + # @return [Hash{Symbol=>String}] + required :env, OpenAI::Internal::Type::HashOf[String] + + # @!attribute type + # The type of the local shell action. Always `exec`. + # + # @return [Symbol, :exec] + required :type, const: :exec + + # @!attribute timeout_ms + # Optional timeout in milliseconds for the command. + # + # @return [Integer, nil] + optional :timeout_ms, Integer, nil?: true + + # @!attribute user + # Optional user to run the command as. + # + # @return [String, nil] + optional :user, String, nil?: true + + # @!attribute working_directory + # Optional working directory to run the command in. + # + # @return [String, nil] + optional :working_directory, String, nil?: true + + # @!method initialize(command:, env:, timeout_ms: nil, user: nil, working_directory: nil, type: :exec) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::Action} for more + # details. + # + # Execute a shell command on the server. + # + # @param command [Array] The command to run. + # + # @param env [Hash{Symbol=>String}] Environment variables to set for the command. + # + # @param timeout_ms [Integer, nil] Optional timeout in milliseconds for the command. + # + # @param user [String, nil] Optional user to run the command as. + # + # @param working_directory [String, nil] Optional working directory to run the command in. + # + # @param type [Symbol, :exec] The type of the local shell action. Always `exec`. + end + + # The status of the local shell call. + # + # @see OpenAI::Models::Responses::ResponseInputItem::LocalShellCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + + class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the local shell tool call generated by the model. + # + # @return [String] + required :id, String + + # @!attribute output + # A JSON string of the output of the local shell tool call. + # + # @return [String] + required :output, String + + # @!attribute type + # The type of the local shell tool call output. Always `local_shell_call_output`. + # + # @return [Symbol, :local_shell_call_output] + required :type, const: :local_shell_call_output + + # @!attribute status + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput::Status, nil] + optional :status, + enum: -> { OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::Status }, + nil?: true + + # @!method initialize(id:, output:, status: nil, type: :local_shell_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput} for more + # details. + # + # The output of a local shell tool call. + # + # @param id [String] The unique ID of the local shell tool call generated by the model. + # + # @param output [String] A JSON string of the output of the local shell tool call. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput::Status, nil] The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # + # @param type [Symbol, :local_shell_call_output] The type of the local shell tool call output. Always `local_shell_call_output`. + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. - class Status < OpenAI::Enum + # + # @see OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - finalize! + # @!method self.values + # @return [Array] end end - class ItemReference < OpenAI::BaseModel + class McpListTools < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the list. + # + # @return [String] + required :id, String + + # @!attribute server_label + # The label of the MCP server. + # + # @return [String] + required :server_label, String + + # @!attribute tools + # The tools available on the server. + # + # @return [Array] + required :tools, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseInputItem::McpListTools::Tool] } + + # @!attribute type + # The type of the item. Always `mcp_list_tools`. + # + # @return [Symbol, :mcp_list_tools] + required :type, const: :mcp_list_tools + + # @!attribute error + # Error message if the server could not list tools. + # + # @return [String, nil] + optional :error, String, nil?: true + + # @!method initialize(id:, server_label:, tools:, error: nil, type: :mcp_list_tools) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::McpListTools} for more details. + # + # A list of tools available on an MCP server. + # + # @param id [String] The unique ID of the list. + # + # @param server_label [String] The label of the MCP server. + # + # @param tools [Array] The tools available on the server. + # + # @param error [String, nil] Error message if the server could not list tools. + # + # @param type [Symbol, :mcp_list_tools] The type of the item. Always `mcp_list_tools`. + + class Tool < OpenAI::Internal::Type::BaseModel + # @!attribute input_schema + # The JSON schema describing the tool's input. + # + # @return [Object] + required :input_schema, OpenAI::Internal::Type::Unknown + + # @!attribute name + # The name of the tool. + # + # @return [String] + required :name, String + + # @!attribute annotations + # Additional annotations about the tool. + # + # @return [Object, nil] + optional :annotations, OpenAI::Internal::Type::Unknown, nil?: true + + # @!attribute description + # The description of the tool. + # + # @return [String, nil] + optional :description, String, nil?: true + + # @!method initialize(input_schema:, name:, annotations: nil, description: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::McpListTools::Tool} for more + # details. + # + # A tool available on an MCP server. + # + # @param input_schema [Object] The JSON schema describing the tool's input. + # + # @param name [String] The name of the tool. + # + # @param annotations [Object, nil] Additional annotations about the tool. + # + # @param description [String, nil] The description of the tool. + end + end + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the approval request. + # + # @return [String] + required :id, String + + # @!attribute arguments + # A JSON string of arguments for the tool. + # + # @return [String] + required :arguments, String + + # @!attribute name + # The name of the tool to run. + # + # @return [String] + required :name, String + + # @!attribute server_label + # The label of the MCP server making the request. + # + # @return [String] + required :server_label, String + + # @!attribute type + # The type of the item. Always `mcp_approval_request`. + # + # @return [Symbol, :mcp_approval_request] + required :type, const: :mcp_approval_request + + # @!method initialize(id:, arguments:, name:, server_label:, type: :mcp_approval_request) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest} for more + # details. + # + # A request for human approval of a tool invocation. + # + # @param id [String] The unique ID of the approval request. + # + # @param arguments [String] A JSON string of arguments for the tool. + # + # @param name [String] The name of the tool to run. + # + # @param server_label [String] The label of the MCP server making the request. + # + # @param type [Symbol, :mcp_approval_request] The type of the item. Always `mcp_approval_request`. + end + + class McpApprovalResponse < OpenAI::Internal::Type::BaseModel + # @!attribute approval_request_id + # The ID of the approval request being answered. + # + # @return [String] + required :approval_request_id, String + + # @!attribute approve + # Whether the request was approved. + # + # @return [Boolean] + required :approve, OpenAI::Internal::Type::Boolean + + # @!attribute type + # The type of the item. Always `mcp_approval_response`. + # + # @return [Symbol, :mcp_approval_response] + required :type, const: :mcp_approval_response + + # @!attribute id + # The unique ID of the approval response + # + # @return [String, nil] + optional :id, String, nil?: true + + # @!attribute reason + # Optional reason for the decision. + # + # @return [String, nil] + optional :reason, String, nil?: true + + # @!method initialize(approval_request_id:, approve:, id: nil, reason: nil, type: :mcp_approval_response) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse} for more + # details. + # + # A response to an MCP approval request. + # + # @param approval_request_id [String] The ID of the approval request being answered. + # + # @param approve [Boolean] Whether the request was approved. + # + # @param id [String, nil] The unique ID of the approval response + # + # @param reason [String, nil] Optional reason for the decision. + # + # @param type [Symbol, :mcp_approval_response] The type of the item. Always `mcp_approval_response`. + end + + class McpCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the tool call. + # + # @return [String] + required :id, String + + # @!attribute arguments + # A JSON string of the arguments passed to the tool. + # + # @return [String] + required :arguments, String + + # @!attribute name + # The name of the tool that was run. + # + # @return [String] + required :name, String + + # @!attribute server_label + # The label of the MCP server running the tool. + # + # @return [String] + required :server_label, String + + # @!attribute type + # The type of the item. Always `mcp_call`. + # + # @return [Symbol, :mcp_call] + required :type, const: :mcp_call + + # @!attribute error + # The error from the tool call, if any. + # + # @return [String, nil] + optional :error, String, nil?: true + + # @!attribute output + # The output from the tool call. + # + # @return [String, nil] + optional :output, String, nil?: true + + # @!method initialize(id:, arguments:, name:, server_label:, error: nil, output: nil, type: :mcp_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputItem::McpCall} for more details. + # + # An invocation of a tool on an MCP server. + # + # @param id [String] The unique ID of the tool call. + # + # @param arguments [String] A JSON string of the arguments passed to the tool. + # + # @param name [String] The name of the tool that was run. + # + # @param server_label [String] The label of the MCP server running the tool. + # + # @param error [String, nil] The error from the tool call, if any. + # + # @param output [String, nil] The output from the tool call. + # + # @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`. + end + + class ItemReference < OpenAI::Internal::Type::BaseModel # @!attribute id # The ID of the item to reference. # @@ -366,19 +846,31 @@ class ItemReference < OpenAI::BaseModel # @!attribute type # The type of item to reference. Always `item_reference`. # - # @return [Symbol, :item_reference] - required :type, const: :item_reference + # @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ItemReference::Type, nil] + optional :type, enum: -> { OpenAI::Responses::ResponseInputItem::ItemReference::Type }, nil?: true - # @!parse - # # An internal identifier for an item to reference. - # # - # # @param id [String] - # # @param type [Symbol, :item_reference] - # # - # def initialize(id:, type: :item_reference, **) = super + # @!method initialize(id:, type: nil) + # An internal identifier for an item to reference. + # + # @param id [String] The ID of the item to reference. + # + # @param type [Symbol, OpenAI::Models::Responses::ResponseInputItem::ItemReference::Type, nil] The type of item to reference. Always `item_reference`. - # def initialize: (Hash | OpenAI::BaseModel) -> void + # The type of item to reference. Always `item_reference`. + # + # @see OpenAI::Models::Responses::ResponseInputItem::ItemReference#type + module Type + extend OpenAI::Internal::Type::Enum + + ITEM_REFERENCE = :item_reference + + # @!method self.values + # @return [Array] + end end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference)] end end end diff --git a/lib/openai/models/responses/response_input_message_content_list.rb b/lib/openai/models/responses/response_input_message_content_list.rb index 9ac48e2d..517253ab 100644 --- a/lib/openai/models/responses/response_input_message_content_list.rb +++ b/lib/openai/models/responses/response_input_message_content_list.rb @@ -3,7 +3,9 @@ module OpenAI module Models module Responses - ResponseInputMessageContentList = OpenAI::ArrayOf[union: -> { OpenAI::Models::Responses::ResponseInputContent }] + # @type [OpenAI::Internal::Type::Converter] + ResponseInputMessageContentList = + OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputContent }] end end end diff --git a/lib/openai/models/responses/response_input_message_item.rb b/lib/openai/models/responses/response_input_message_item.rb new file mode 100644 index 00000000..48236782 --- /dev/null +++ b/lib/openai/models/responses/response_input_message_item.rb @@ -0,0 +1,96 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseInputMessageItem < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the message input. + # + # @return [String] + required :id, String + + # @!attribute content + # A list of one or many input items to the model, containing different content + # types. + # + # @return [Array] + required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] } + + # @!attribute role + # The role of the message input. One of `user`, `system`, or `developer`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Role] + required :role, enum: -> { OpenAI::Responses::ResponseInputMessageItem::Role } + + # @!attribute status + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Status, nil] + optional :status, enum: -> { OpenAI::Responses::ResponseInputMessageItem::Status } + + # @!attribute type + # The type of the message input. Always set to `message`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Type, nil] + optional :type, enum: -> { OpenAI::Responses::ResponseInputMessageItem::Type } + + # @!method initialize(id:, content:, role:, status: nil, type: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseInputMessageItem} for more details. + # + # @param id [String] The unique ID of the message input. + # + # @param content [Array] A list of one or many input items to the model, containing different content + # + # @param role [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Role] The role of the message input. One of `user`, `system`, or `developer`. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Status] The status of item. One of `in_progress`, `completed`, or + # + # @param type [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Type] The type of the message input. Always set to `message`. + + # The role of the message input. One of `user`, `system`, or `developer`. + # + # @see OpenAI::Models::Responses::ResponseInputMessageItem#role + module Role + extend OpenAI::Internal::Type::Enum + + USER = :user + SYSTEM = :system + DEVELOPER = :developer + + # @!method self.values + # @return [Array] + end + + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + # + # @see OpenAI::Models::Responses::ResponseInputMessageItem#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + + # The type of the message input. Always set to `message`. + # + # @see OpenAI::Models::Responses::ResponseInputMessageItem#type + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE = :message + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/responses/response_input_text.rb b/lib/openai/models/responses/response_input_text.rb index c38be5e5..9735f187 100644 --- a/lib/openai/models/responses/response_input_text.rb +++ b/lib/openai/models/responses/response_input_text.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseInputText < OpenAI::BaseModel + class ResponseInputText < OpenAI::Internal::Type::BaseModel # @!attribute text # The text input to the model. # @@ -16,15 +16,12 @@ class ResponseInputText < OpenAI::BaseModel # @return [Symbol, :input_text] required :type, const: :input_text - # @!parse - # # A text input to the model. - # # - # # @param text [String] - # # @param type [Symbol, :input_text] - # # - # def initialize(text:, type: :input_text, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(text:, type: :input_text) + # A text input to the model. + # + # @param text [String] The text input to the model. + # + # @param type [Symbol, :input_text] The type of the input item. Always `input_text`. end end end diff --git a/lib/openai/models/responses/response_item.rb b/lib/openai/models/responses/response_item.rb new file mode 100644 index 00000000..4b501f36 --- /dev/null +++ b/lib/openai/models/responses/response_item.rb @@ -0,0 +1,549 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + # Content item used to generate a response. + module ResponseItem + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :message, -> { OpenAI::Responses::ResponseInputMessageItem } + + # An output message from the model. + variant :message, -> { OpenAI::Responses::ResponseOutputMessage } + + # The results of a file search tool call. See the + # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information. + variant :file_search_call, -> { OpenAI::Responses::ResponseFileSearchToolCall } + + # A tool call to a computer use tool. See the + # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information. + variant :computer_call, -> { OpenAI::Responses::ResponseComputerToolCall } + + variant :computer_call_output, -> { OpenAI::Responses::ResponseComputerToolCallOutputItem } + + # The results of a web search tool call. See the + # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information. + variant :web_search_call, -> { OpenAI::Responses::ResponseFunctionWebSearch } + + # A tool call to run a function. See the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. + variant :function_call, -> { OpenAI::Responses::ResponseFunctionToolCallItem } + + variant :function_call_output, -> { OpenAI::Responses::ResponseFunctionToolCallOutputItem } + + # An image generation request made by the model. + variant :image_generation_call, -> { OpenAI::Responses::ResponseItem::ImageGenerationCall } + + # A tool call to run code. + variant :code_interpreter_call, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall } + + # A tool call to run a command on the local shell. + variant :local_shell_call, -> { OpenAI::Responses::ResponseItem::LocalShellCall } + + # The output of a local shell tool call. + variant :local_shell_call_output, -> { OpenAI::Responses::ResponseItem::LocalShellCallOutput } + + # A list of tools available on an MCP server. + variant :mcp_list_tools, -> { OpenAI::Responses::ResponseItem::McpListTools } + + # A request for human approval of a tool invocation. + variant :mcp_approval_request, -> { OpenAI::Responses::ResponseItem::McpApprovalRequest } + + # A response to an MCP approval request. + variant :mcp_approval_response, -> { OpenAI::Responses::ResponseItem::McpApprovalResponse } + + # An invocation of a tool on an MCP server. + variant :mcp_call, -> { OpenAI::Responses::ResponseItem::McpCall } + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the image generation call. + # + # @return [String] + required :id, String + + # @!attribute result + # The generated image encoded in base64. + # + # @return [String, nil] + required :result, String, nil?: true + + # @!attribute status + # The status of the image generation call. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall::Status] + required :status, enum: -> { OpenAI::Responses::ResponseItem::ImageGenerationCall::Status } + + # @!attribute type + # The type of the image generation call. Always `image_generation_call`. + # + # @return [Symbol, :image_generation_call] + required :type, const: :image_generation_call + + # @!method initialize(id:, result:, status:, type: :image_generation_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseItem::ImageGenerationCall} for more details. + # + # An image generation request made by the model. + # + # @param id [String] The unique ID of the image generation call. + # + # @param result [String, nil] The generated image encoded in base64. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall::Status] The status of the image generation call. + # + # @param type [Symbol, :image_generation_call] The type of the image generation call. Always `image_generation_call`. + + # The status of the image generation call. + # + # @see OpenAI::Models::Responses::ResponseItem::ImageGenerationCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + GENERATING = :generating + FAILED = :failed + + # @!method self.values + # @return [Array] + end + end + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the local shell call. + # + # @return [String] + required :id, String + + # @!attribute action + # Execute a shell command on the server. + # + # @return [OpenAI::Models::Responses::ResponseItem::LocalShellCall::Action] + required :action, -> { OpenAI::Responses::ResponseItem::LocalShellCall::Action } + + # @!attribute call_id + # The unique ID of the local shell tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute status + # The status of the local shell call. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseItem::LocalShellCall::Status] + required :status, enum: -> { OpenAI::Responses::ResponseItem::LocalShellCall::Status } + + # @!attribute type + # The type of the local shell call. Always `local_shell_call`. + # + # @return [Symbol, :local_shell_call] + required :type, const: :local_shell_call + + # @!method initialize(id:, action:, call_id:, status:, type: :local_shell_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseItem::LocalShellCall} for more details. + # + # A tool call to run a command on the local shell. + # + # @param id [String] The unique ID of the local shell call. + # + # @param action [OpenAI::Models::Responses::ResponseItem::LocalShellCall::Action] Execute a shell command on the server. + # + # @param call_id [String] The unique ID of the local shell tool call generated by the model. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseItem::LocalShellCall::Status] The status of the local shell call. + # + # @param type [Symbol, :local_shell_call] The type of the local shell call. Always `local_shell_call`. + + # @see OpenAI::Models::Responses::ResponseItem::LocalShellCall#action + class Action < OpenAI::Internal::Type::BaseModel + # @!attribute command + # The command to run. + # + # @return [Array] + required :command, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute env + # Environment variables to set for the command. + # + # @return [Hash{Symbol=>String}] + required :env, OpenAI::Internal::Type::HashOf[String] + + # @!attribute type + # The type of the local shell action. Always `exec`. + # + # @return [Symbol, :exec] + required :type, const: :exec + + # @!attribute timeout_ms + # Optional timeout in milliseconds for the command. + # + # @return [Integer, nil] + optional :timeout_ms, Integer, nil?: true + + # @!attribute user + # Optional user to run the command as. + # + # @return [String, nil] + optional :user, String, nil?: true + + # @!attribute working_directory + # Optional working directory to run the command in. + # + # @return [String, nil] + optional :working_directory, String, nil?: true + + # @!method initialize(command:, env:, timeout_ms: nil, user: nil, working_directory: nil, type: :exec) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseItem::LocalShellCall::Action} for more + # details. + # + # Execute a shell command on the server. + # + # @param command [Array] The command to run. + # + # @param env [Hash{Symbol=>String}] Environment variables to set for the command. + # + # @param timeout_ms [Integer, nil] Optional timeout in milliseconds for the command. + # + # @param user [String, nil] Optional user to run the command as. + # + # @param working_directory [String, nil] Optional working directory to run the command in. + # + # @param type [Symbol, :exec] The type of the local shell action. Always `exec`. + end + + # The status of the local shell call. + # + # @see OpenAI::Models::Responses::ResponseItem::LocalShellCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + + class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the local shell tool call generated by the model. + # + # @return [String] + required :id, String + + # @!attribute output + # A JSON string of the output of the local shell tool call. + # + # @return [String] + required :output, String + + # @!attribute type + # The type of the local shell tool call output. Always `local_shell_call_output`. + # + # @return [Symbol, :local_shell_call_output] + required :type, const: :local_shell_call_output + + # @!attribute status + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput::Status, nil] + optional :status, + enum: -> { + OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status + }, + nil?: true + + # @!method initialize(id:, output:, status: nil, type: :local_shell_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput} for more + # details. + # + # The output of a local shell tool call. + # + # @param id [String] The unique ID of the local shell tool call generated by the model. + # + # @param output [String] A JSON string of the output of the local shell tool call. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput::Status, nil] The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # + # @param type [Symbol, :local_shell_call_output] The type of the local shell tool call output. Always `local_shell_call_output`. + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # + # @see OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + + class McpListTools < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the list. + # + # @return [String] + required :id, String + + # @!attribute server_label + # The label of the MCP server. + # + # @return [String] + required :server_label, String + + # @!attribute tools + # The tools available on the server. + # + # @return [Array] + required :tools, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseItem::McpListTools::Tool] } + + # @!attribute type + # The type of the item. Always `mcp_list_tools`. + # + # @return [Symbol, :mcp_list_tools] + required :type, const: :mcp_list_tools + + # @!attribute error + # Error message if the server could not list tools. + # + # @return [String, nil] + optional :error, String, nil?: true + + # @!method initialize(id:, server_label:, tools:, error: nil, type: :mcp_list_tools) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseItem::McpListTools} for more details. + # + # A list of tools available on an MCP server. + # + # @param id [String] The unique ID of the list. + # + # @param server_label [String] The label of the MCP server. + # + # @param tools [Array] The tools available on the server. + # + # @param error [String, nil] Error message if the server could not list tools. + # + # @param type [Symbol, :mcp_list_tools] The type of the item. Always `mcp_list_tools`. + + class Tool < OpenAI::Internal::Type::BaseModel + # @!attribute input_schema + # The JSON schema describing the tool's input. + # + # @return [Object] + required :input_schema, OpenAI::Internal::Type::Unknown + + # @!attribute name + # The name of the tool. + # + # @return [String] + required :name, String + + # @!attribute annotations + # Additional annotations about the tool. + # + # @return [Object, nil] + optional :annotations, OpenAI::Internal::Type::Unknown, nil?: true + + # @!attribute description + # The description of the tool. + # + # @return [String, nil] + optional :description, String, nil?: true + + # @!method initialize(input_schema:, name:, annotations: nil, description: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseItem::McpListTools::Tool} for more details. + # + # A tool available on an MCP server. + # + # @param input_schema [Object] The JSON schema describing the tool's input. + # + # @param name [String] The name of the tool. + # + # @param annotations [Object, nil] Additional annotations about the tool. + # + # @param description [String, nil] The description of the tool. + end + end + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the approval request. + # + # @return [String] + required :id, String + + # @!attribute arguments + # A JSON string of arguments for the tool. + # + # @return [String] + required :arguments, String + + # @!attribute name + # The name of the tool to run. + # + # @return [String] + required :name, String + + # @!attribute server_label + # The label of the MCP server making the request. + # + # @return [String] + required :server_label, String + + # @!attribute type + # The type of the item. Always `mcp_approval_request`. + # + # @return [Symbol, :mcp_approval_request] + required :type, const: :mcp_approval_request + + # @!method initialize(id:, arguments:, name:, server_label:, type: :mcp_approval_request) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseItem::McpApprovalRequest} for more details. + # + # A request for human approval of a tool invocation. + # + # @param id [String] The unique ID of the approval request. + # + # @param arguments [String] A JSON string of arguments for the tool. + # + # @param name [String] The name of the tool to run. + # + # @param server_label [String] The label of the MCP server making the request. + # + # @param type [Symbol, :mcp_approval_request] The type of the item. Always `mcp_approval_request`. + end + + class McpApprovalResponse < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the approval response + # + # @return [String] + required :id, String + + # @!attribute approval_request_id + # The ID of the approval request being answered. + # + # @return [String] + required :approval_request_id, String + + # @!attribute approve + # Whether the request was approved. + # + # @return [Boolean] + required :approve, OpenAI::Internal::Type::Boolean + + # @!attribute type + # The type of the item. Always `mcp_approval_response`. + # + # @return [Symbol, :mcp_approval_response] + required :type, const: :mcp_approval_response + + # @!attribute reason + # Optional reason for the decision. + # + # @return [String, nil] + optional :reason, String, nil?: true + + # @!method initialize(id:, approval_request_id:, approve:, reason: nil, type: :mcp_approval_response) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseItem::McpApprovalResponse} for more details. + # + # A response to an MCP approval request. + # + # @param id [String] The unique ID of the approval response + # + # @param approval_request_id [String] The ID of the approval request being answered. + # + # @param approve [Boolean] Whether the request was approved. + # + # @param reason [String, nil] Optional reason for the decision. + # + # @param type [Symbol, :mcp_approval_response] The type of the item. Always `mcp_approval_response`. + end + + class McpCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the tool call. + # + # @return [String] + required :id, String + + # @!attribute arguments + # A JSON string of the arguments passed to the tool. + # + # @return [String] + required :arguments, String + + # @!attribute name + # The name of the tool that was run. + # + # @return [String] + required :name, String + + # @!attribute server_label + # The label of the MCP server running the tool. + # + # @return [String] + required :server_label, String + + # @!attribute type + # The type of the item. Always `mcp_call`. + # + # @return [Symbol, :mcp_call] + required :type, const: :mcp_call + + # @!attribute error + # The error from the tool call, if any. + # + # @return [String, nil] + optional :error, String, nil?: true + + # @!attribute output + # The output from the tool call. + # + # @return [String, nil] + optional :output, String, nil?: true + + # @!method initialize(id:, arguments:, name:, server_label:, error: nil, output: nil, type: :mcp_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseItem::McpCall} for more details. + # + # An invocation of a tool on an MCP server. + # + # @param id [String] The unique ID of the tool call. + # + # @param arguments [String] A JSON string of the arguments passed to the tool. + # + # @param name [String] The name of the tool that was run. + # + # @param server_label [String] The label of the MCP server running the tool. + # + # @param error [String, nil] The error from the tool call, if any. + # + # @param output [String, nil] The output from the tool call. + # + # @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall)] + end + end + end +end diff --git a/lib/openai/models/responses/response_item_list.rb b/lib/openai/models/responses/response_item_list.rb index 595ec411..ea2b6f1b 100644 --- a/lib/openai/models/responses/response_item_list.rb +++ b/lib/openai/models/responses/response_item_list.rb @@ -3,12 +3,12 @@ module OpenAI module Models module Responses - class ResponseItemList < OpenAI::BaseModel + class ResponseItemList < OpenAI::Internal::Type::BaseModel # @!attribute data # A list of items used to generate this response. # - # @return [Array] - required :data, -> { OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseItemList::Data] } + # @return [Array] + required :data, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseItem] } # @!attribute first_id # The ID of the first item in the list. @@ -20,7 +20,7 @@ class ResponseItemList < OpenAI::BaseModel # Whether there are more items available. # # @return [Boolean] - required :has_more, OpenAI::BooleanModel + required :has_more, OpenAI::Internal::Type::Boolean # @!attribute last_id # The ID of the last item in the list. @@ -34,343 +34,18 @@ class ResponseItemList < OpenAI::BaseModel # @return [Symbol, :list] required :object, const: :list - # @!parse - # # A list of Response items. - # # - # # @param data [Array] - # # @param first_id [String] - # # @param has_more [Boolean] - # # @param last_id [String] - # # @param object [Symbol, :list] - # # - # def initialize(data:, first_id:, has_more:, last_id:, object: :list, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(data:, first_id:, has_more:, last_id:, object: :list) + # A list of Response items. # - # Content item used to generate a response. - class Data < OpenAI::Union - discriminator :type - - variant :message, -> { OpenAI::Models::Responses::ResponseItemList::Data::Message } - - # An output message from the model. - variant :message, -> { OpenAI::Models::Responses::ResponseOutputMessage } - - # The results of a file search tool call. See the - # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information. - variant :file_search_call, -> { OpenAI::Models::Responses::ResponseFileSearchToolCall } - - # A tool call to a computer use tool. See the - # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information. - variant :computer_call, -> { OpenAI::Models::Responses::ResponseComputerToolCall } - - variant :computer_call_output, - -> { OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput } - - # The results of a web search tool call. See the - # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information. - variant :web_search_call, -> { OpenAI::Models::Responses::ResponseFunctionWebSearch } - - # A tool call to run a function. See the - # [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. - variant :function_call, -> { OpenAI::Models::Responses::ResponseFunctionToolCall } - - variant :function_call_output, - -> { OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput } - - class Message < OpenAI::BaseModel - # @!attribute id - # The unique ID of the message input. - # - # @return [String] - required :id, String - - # @!attribute content - # A list of one or many input items to the model, containing different content - # types. - # - # @return [Array] - required :content, -> { OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseInputContent] } - - # @!attribute role - # The role of the message input. One of `user`, `system`, or `developer`. - # - # @return [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::Message::Role] - required :role, enum: -> { OpenAI::Models::Responses::ResponseItemList::Data::Message::Role } - - # @!attribute [r] status - # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. - # - # @return [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::Message::Status, nil] - optional :status, enum: -> { OpenAI::Models::Responses::ResponseItemList::Data::Message::Status } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::Message::Status] - # attr_writer :status - - # @!attribute [r] type - # The type of the message input. Always set to `message`. - # - # @return [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::Message::Type, nil] - optional :type, enum: -> { OpenAI::Models::Responses::ResponseItemList::Data::Message::Type } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::Message::Type] - # attr_writer :type - - # @!parse - # # @param id [String] - # # @param content [Array] - # # @param role [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::Message::Role] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::Message::Status] - # # @param type [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::Message::Type] - # # - # def initialize(id:, content:, role:, status: nil, type: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # The role of the message input. One of `user`, `system`, or `developer`. - class Role < OpenAI::Enum - USER = :user - SYSTEM = :system - DEVELOPER = :developer - - finalize! - end - - # @abstract - # - # The status of item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. - class Status < OpenAI::Enum - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - finalize! - end - - # @abstract - # - # The type of the message input. Always set to `message`. - class Type < OpenAI::Enum - MESSAGE = :message - - finalize! - end - end - - class ComputerCallOutput < OpenAI::BaseModel - # @!attribute id - # The unique ID of the computer call tool output. - # - # @return [String] - required :id, String - - # @!attribute call_id - # The ID of the computer tool call that produced the output. - # - # @return [String] - required :call_id, String - - # @!attribute output - # A computer screenshot image used with the computer use tool. - # - # @return [OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output] - required :output, -> { OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output } - - # @!attribute type - # The type of the computer tool call output. Always `computer_call_output`. - # - # @return [Symbol, :computer_call_output] - required :type, const: :computer_call_output - - # @!attribute [r] acknowledged_safety_checks - # The safety checks reported by the API that have been acknowledged by the - # developer. - # - # @return [Array, nil] - optional :acknowledged_safety_checks, - -> { OpenAI::ArrayOf[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck] } - - # @!parse - # # @return [Array] - # attr_writer :acknowledged_safety_checks - - # @!attribute [r] status - # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. - # - # @return [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Status, nil] - optional :status, - enum: -> { OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Status } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Status] - # attr_writer :status - - # @!parse - # # @param id [String] - # # @param call_id [String] - # # @param output [OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output] - # # @param acknowledged_safety_checks [Array] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Status] - # # @param type [Symbol, :computer_call_output] - # # - # def initialize(id:, call_id:, output:, acknowledged_safety_checks: nil, status: nil, type: :computer_call_output, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Output < OpenAI::BaseModel - # @!attribute type - # Specifies the event type. For a computer screenshot, this property is always set - # to `computer_screenshot`. - # - # @return [Symbol, :computer_screenshot] - required :type, const: :computer_screenshot - - # @!attribute [r] file_id - # The identifier of an uploaded file that contains the screenshot. - # - # @return [String, nil] - optional :file_id, String - - # @!parse - # # @return [String] - # attr_writer :file_id - - # @!attribute [r] image_url - # The URL of the screenshot image. - # - # @return [String, nil] - optional :image_url, String - - # @!parse - # # @return [String] - # attr_writer :image_url - - # @!parse - # # A computer screenshot image used with the computer use tool. - # # - # # @param file_id [String] - # # @param image_url [String] - # # @param type [Symbol, :computer_screenshot] - # # - # def initialize(file_id: nil, image_url: nil, type: :computer_screenshot, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - end - - class AcknowledgedSafetyCheck < OpenAI::BaseModel - # @!attribute id - # The ID of the pending safety check. - # - # @return [String] - required :id, String - - # @!attribute code - # The type of the pending safety check. - # - # @return [String] - required :code, String - - # @!attribute message - # Details about the pending safety check. - # - # @return [String] - required :message, String - - # @!parse - # # A pending safety check for the computer call. - # # - # # @param id [String] - # # @param code [String] - # # @param message [String] - # # - # def initialize(id:, code:, message:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - end - - # @abstract - # - # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. - class Status < OpenAI::Enum - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - finalize! - end - end - - class FunctionCallOutput < OpenAI::BaseModel - # @!attribute id - # The unique ID of the function call tool output. - # - # @return [String] - required :id, String - - # @!attribute call_id - # The unique ID of the function tool call generated by the model. - # - # @return [String] - required :call_id, String - - # @!attribute output - # A JSON string of the output of the function tool call. - # - # @return [String] - required :output, String - - # @!attribute type - # The type of the function tool call output. Always `function_call_output`. - # - # @return [Symbol, :function_call_output] - required :type, const: :function_call_output - - # @!attribute [r] status - # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. - # - # @return [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::Status, nil] - optional :status, - enum: -> { OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::Status } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::Status] - # attr_writer :status - - # @!parse - # # @param id [String] - # # @param call_id [String] - # # @param output [String] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::Status] - # # @param type [Symbol, :function_call_output] - # # - # def initialize(id:, call_id:, output:, status: nil, type: :function_call_output, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. - class Status < OpenAI::Enum - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - finalize! - end - end - end + # @param data [Array] A list of items used to generate this response. + # + # @param first_id [String] The ID of the first item in the list. + # + # @param has_more [Boolean] Whether there are more items available. + # + # @param last_id [String] The ID of the last item in the list. + # + # @param object [Symbol, :list] The type of object returned, must be `list`. end end diff --git a/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb b/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb new file mode 100644 index 00000000..c5f7d2f3 --- /dev/null +++ b/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb @@ -0,0 +1,58 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseMcpCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel + # @!attribute delta + # A JSON string containing the partial update to the arguments for the MCP tool + # call. + # + # @return [String] + required :delta, String + + # @!attribute item_id + # The unique identifier of the MCP tool call item being processed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item in the response's output array. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.mcp_call_arguments.delta'. + # + # @return [Symbol, :"response.mcp_call_arguments.delta"] + required :type, const: :"response.mcp_call_arguments.delta" + + # @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.mcp_call_arguments.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent} for more + # details. + # + # Emitted when there is a delta (partial update) to the arguments of an MCP tool + # call. + # + # @param delta [String] A JSON string containing the partial update to the arguments for the MCP tool ca + # + # @param item_id [String] The unique identifier of the MCP tool call item being processed. + # + # @param output_index [Integer] The index of the output item in the response's output array. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.mcp_call_arguments.delta"] The type of the event. Always 'response.mcp_call_arguments.delta'. + end + end + end +end diff --git a/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb b/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb new file mode 100644 index 00000000..8b52f9db --- /dev/null +++ b/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseMcpCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel + # @!attribute arguments + # A JSON string containing the finalized arguments for the MCP tool call. + # + # @return [String] + required :arguments, String + + # @!attribute item_id + # The unique identifier of the MCP tool call item being processed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item in the response's output array. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.mcp_call_arguments.done'. + # + # @return [Symbol, :"response.mcp_call_arguments.done"] + required :type, const: :"response.mcp_call_arguments.done" + + # @!method initialize(arguments:, item_id:, output_index:, sequence_number:, type: :"response.mcp_call_arguments.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent} for more details. + # + # Emitted when the arguments for an MCP tool call are finalized. + # + # @param arguments [String] A JSON string containing the finalized arguments for the MCP tool call. + # + # @param item_id [String] The unique identifier of the MCP tool call item being processed. + # + # @param output_index [Integer] The index of the output item in the response's output array. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.mcp_call_arguments.done"] The type of the event. Always 'response.mcp_call_arguments.done'. + end + end + end +end diff --git a/lib/openai/models/responses/response_mcp_call_completed_event.rb b/lib/openai/models/responses/response_mcp_call_completed_event.rb new file mode 100644 index 00000000..217de6ff --- /dev/null +++ b/lib/openai/models/responses/response_mcp_call_completed_event.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseMcpCallCompletedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The ID of the MCP tool call item that completed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item that completed. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.mcp_call.completed'. + # + # @return [Symbol, :"response.mcp_call.completed"] + required :type, const: :"response.mcp_call.completed" + + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_call.completed") + # Emitted when an MCP tool call has completed successfully. + # + # @param item_id [String] The ID of the MCP tool call item that completed. + # + # @param output_index [Integer] The index of the output item that completed. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.mcp_call.completed"] The type of the event. Always 'response.mcp_call.completed'. + end + end + end +end diff --git a/lib/openai/models/responses/response_mcp_call_failed_event.rb b/lib/openai/models/responses/response_mcp_call_failed_event.rb new file mode 100644 index 00000000..2ddbc594 --- /dev/null +++ b/lib/openai/models/responses/response_mcp_call_failed_event.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseMcpCallFailedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The ID of the MCP tool call item that failed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item that failed. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.mcp_call.failed'. + # + # @return [Symbol, :"response.mcp_call.failed"] + required :type, const: :"response.mcp_call.failed" + + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_call.failed") + # Emitted when an MCP tool call has failed. + # + # @param item_id [String] The ID of the MCP tool call item that failed. + # + # @param output_index [Integer] The index of the output item that failed. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.mcp_call.failed"] The type of the event. Always 'response.mcp_call.failed'. + end + end + end +end diff --git a/lib/openai/models/responses/response_mcp_call_in_progress_event.rb b/lib/openai/models/responses/response_mcp_call_in_progress_event.rb new file mode 100644 index 00000000..4d02f14f --- /dev/null +++ b/lib/openai/models/responses/response_mcp_call_in_progress_event.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseMcpCallInProgressEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The unique identifier of the MCP tool call item being processed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item in the response's output array. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.mcp_call.in_progress'. + # + # @return [Symbol, :"response.mcp_call.in_progress"] + required :type, const: :"response.mcp_call.in_progress" + + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_call.in_progress") + # Emitted when an MCP tool call is in progress. + # + # @param item_id [String] The unique identifier of the MCP tool call item being processed. + # + # @param output_index [Integer] The index of the output item in the response's output array. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.mcp_call.in_progress"] The type of the event. Always 'response.mcp_call.in_progress'. + end + end + end +end diff --git a/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb b/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb new file mode 100644 index 00000000..fd58a599 --- /dev/null +++ b/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseMcpListToolsCompletedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The ID of the MCP tool call item that produced this output. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item that was processed. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.mcp_list_tools.completed'. + # + # @return [Symbol, :"response.mcp_list_tools.completed"] + required :type, const: :"response.mcp_list_tools.completed" + + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_list_tools.completed") + # Emitted when the list of available MCP tools has been successfully retrieved. + # + # @param item_id [String] The ID of the MCP tool call item that produced this output. + # + # @param output_index [Integer] The index of the output item that was processed. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.mcp_list_tools.completed"] The type of the event. Always 'response.mcp_list_tools.completed'. + end + end + end +end diff --git a/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb b/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb new file mode 100644 index 00000000..d93f1e94 --- /dev/null +++ b/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseMcpListToolsFailedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The ID of the MCP tool call item that failed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item that failed. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.mcp_list_tools.failed'. + # + # @return [Symbol, :"response.mcp_list_tools.failed"] + required :type, const: :"response.mcp_list_tools.failed" + + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_list_tools.failed") + # Emitted when the attempt to list available MCP tools has failed. + # + # @param item_id [String] The ID of the MCP tool call item that failed. + # + # @param output_index [Integer] The index of the output item that failed. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.mcp_list_tools.failed"] The type of the event. Always 'response.mcp_list_tools.failed'. + end + end + end +end diff --git a/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb b/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb new file mode 100644 index 00000000..5bf708ac --- /dev/null +++ b/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseMcpListToolsInProgressEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The ID of the MCP tool call item that is being processed. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item that is being processed. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.mcp_list_tools.in_progress'. + # + # @return [Symbol, :"response.mcp_list_tools.in_progress"] + required :type, const: :"response.mcp_list_tools.in_progress" + + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_list_tools.in_progress") + # Emitted when the system is in the process of retrieving the list of available + # MCP tools. + # + # @param item_id [String] The ID of the MCP tool call item that is being processed. + # + # @param output_index [Integer] The index of the output item that is being processed. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.mcp_list_tools.in_progress"] The type of the event. Always 'response.mcp_list_tools.in_progress'. + end + end + end +end diff --git a/lib/openai/models/responses/response_output_audio.rb b/lib/openai/models/responses/response_output_audio.rb index da147435..811fed00 100644 --- a/lib/openai/models/responses/response_output_audio.rb +++ b/lib/openai/models/responses/response_output_audio.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseOutputAudio < OpenAI::BaseModel + class ResponseOutputAudio < OpenAI::Internal::Type::BaseModel # @!attribute data # Base64-encoded audio data from the model. # @@ -22,16 +22,17 @@ class ResponseOutputAudio < OpenAI::BaseModel # @return [Symbol, :output_audio] required :type, const: :output_audio - # @!parse - # # An audio output from the model. - # # - # # @param data [String] - # # @param transcript [String] - # # @param type [Symbol, :output_audio] - # # - # def initialize(data:, transcript:, type: :output_audio, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, transcript:, type: :output_audio) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputAudio} for more details. + # + # An audio output from the model. + # + # @param data [String] Base64-encoded audio data from the model. + # + # @param transcript [String] The transcript of the audio data from the model. + # + # @param type [Symbol, :output_audio] The type of the output audio. Always `output_audio`. end end end diff --git a/lib/openai/models/responses/response_output_item.rb b/lib/openai/models/responses/response_output_item.rb index 2a71d21e..5e2bd164 100644 --- a/lib/openai/models/responses/response_output_item.rb +++ b/lib/openai/models/responses/response_output_item.rb @@ -3,34 +3,440 @@ module OpenAI module Models module Responses - # @abstract - # # An output message from the model. - class ResponseOutputItem < OpenAI::Union + module ResponseOutputItem + extend OpenAI::Internal::Type::Union + discriminator :type # An output message from the model. - variant :message, -> { OpenAI::Models::Responses::ResponseOutputMessage } + variant :message, -> { OpenAI::Responses::ResponseOutputMessage } # The results of a file search tool call. See the # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information. - variant :file_search_call, -> { OpenAI::Models::Responses::ResponseFileSearchToolCall } + variant :file_search_call, -> { OpenAI::Responses::ResponseFileSearchToolCall } # A tool call to run a function. See the # [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. - variant :function_call, -> { OpenAI::Models::Responses::ResponseFunctionToolCall } + variant :function_call, -> { OpenAI::Responses::ResponseFunctionToolCall } # The results of a web search tool call. See the # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information. - variant :web_search_call, -> { OpenAI::Models::Responses::ResponseFunctionWebSearch } + variant :web_search_call, -> { OpenAI::Responses::ResponseFunctionWebSearch } # A tool call to a computer use tool. See the # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information. - variant :computer_call, -> { OpenAI::Models::Responses::ResponseComputerToolCall } + variant :computer_call, -> { OpenAI::Responses::ResponseComputerToolCall } # A description of the chain of thought used by a reasoning model while generating - # a response. - variant :reasoning, -> { OpenAI::Models::Responses::ResponseReasoningItem } + # a response. Be sure to include these items in your `input` to the Responses API + # for subsequent turns of a conversation if you are manually + # [managing context](https://platform.openai.com/docs/guides/conversation-state). + variant :reasoning, -> { OpenAI::Responses::ResponseReasoningItem } + + # An image generation request made by the model. + variant :image_generation_call, -> { OpenAI::Responses::ResponseOutputItem::ImageGenerationCall } + + # A tool call to run code. + variant :code_interpreter_call, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall } + + # A tool call to run a command on the local shell. + variant :local_shell_call, -> { OpenAI::Responses::ResponseOutputItem::LocalShellCall } + + # An invocation of a tool on an MCP server. + variant :mcp_call, -> { OpenAI::Responses::ResponseOutputItem::McpCall } + + # A list of tools available on an MCP server. + variant :mcp_list_tools, -> { OpenAI::Responses::ResponseOutputItem::McpListTools } + + # A request for human approval of a tool invocation. + variant :mcp_approval_request, -> { OpenAI::Responses::ResponseOutputItem::McpApprovalRequest } + + # A call to a custom tool created by the model. + variant :custom_tool_call, -> { OpenAI::Responses::ResponseCustomToolCall } + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the image generation call. + # + # @return [String] + required :id, String + + # @!attribute result + # The generated image encoded in base64. + # + # @return [String, nil] + required :result, String, nil?: true + + # @!attribute status + # The status of the image generation call. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall::Status] + required :status, enum: -> { OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status } + + # @!attribute type + # The type of the image generation call. Always `image_generation_call`. + # + # @return [Symbol, :image_generation_call] + required :type, const: :image_generation_call + + # @!method initialize(id:, result:, status:, type: :image_generation_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall} for more + # details. + # + # An image generation request made by the model. + # + # @param id [String] The unique ID of the image generation call. + # + # @param result [String, nil] The generated image encoded in base64. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall::Status] The status of the image generation call. + # + # @param type [Symbol, :image_generation_call] The type of the image generation call. Always `image_generation_call`. + + # The status of the image generation call. + # + # @see OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + GENERATING = :generating + FAILED = :failed + + # @!method self.values + # @return [Array] + end + end + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the local shell call. + # + # @return [String] + required :id, String + + # @!attribute action + # Execute a shell command on the server. + # + # @return [OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::Action] + required :action, -> { OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action } + + # @!attribute call_id + # The unique ID of the local shell tool call generated by the model. + # + # @return [String] + required :call_id, String + + # @!attribute status + # The status of the local shell call. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::Status] + required :status, enum: -> { OpenAI::Responses::ResponseOutputItem::LocalShellCall::Status } + + # @!attribute type + # The type of the local shell call. Always `local_shell_call`. + # + # @return [Symbol, :local_shell_call] + required :type, const: :local_shell_call + + # @!method initialize(id:, action:, call_id:, status:, type: :local_shell_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall} for more + # details. + # + # A tool call to run a command on the local shell. + # + # @param id [String] The unique ID of the local shell call. + # + # @param action [OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::Action] Execute a shell command on the server. + # + # @param call_id [String] The unique ID of the local shell tool call generated by the model. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::Status] The status of the local shell call. + # + # @param type [Symbol, :local_shell_call] The type of the local shell call. Always `local_shell_call`. + + # @see OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall#action + class Action < OpenAI::Internal::Type::BaseModel + # @!attribute command + # The command to run. + # + # @return [Array] + required :command, OpenAI::Internal::Type::ArrayOf[String] + + # @!attribute env + # Environment variables to set for the command. + # + # @return [Hash{Symbol=>String}] + required :env, OpenAI::Internal::Type::HashOf[String] + + # @!attribute type + # The type of the local shell action. Always `exec`. + # + # @return [Symbol, :exec] + required :type, const: :exec + + # @!attribute timeout_ms + # Optional timeout in milliseconds for the command. + # + # @return [Integer, nil] + optional :timeout_ms, Integer, nil?: true + + # @!attribute user + # Optional user to run the command as. + # + # @return [String, nil] + optional :user, String, nil?: true + + # @!attribute working_directory + # Optional working directory to run the command in. + # + # @return [String, nil] + optional :working_directory, String, nil?: true + + # @!method initialize(command:, env:, timeout_ms: nil, user: nil, working_directory: nil, type: :exec) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::Action} for more + # details. + # + # Execute a shell command on the server. + # + # @param command [Array] The command to run. + # + # @param env [Hash{Symbol=>String}] Environment variables to set for the command. + # + # @param timeout_ms [Integer, nil] Optional timeout in milliseconds for the command. + # + # @param user [String, nil] Optional user to run the command as. + # + # @param working_directory [String, nil] Optional working directory to run the command in. + # + # @param type [Symbol, :exec] The type of the local shell action. Always `exec`. + end + + # The status of the local shell call. + # + # @see OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall#status + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS = :in_progress + COMPLETED = :completed + INCOMPLETE = :incomplete + + # @!method self.values + # @return [Array] + end + end + + class McpCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the tool call. + # + # @return [String] + required :id, String + + # @!attribute arguments + # A JSON string of the arguments passed to the tool. + # + # @return [String] + required :arguments, String + + # @!attribute name + # The name of the tool that was run. + # + # @return [String] + required :name, String + + # @!attribute server_label + # The label of the MCP server running the tool. + # + # @return [String] + required :server_label, String + + # @!attribute type + # The type of the item. Always `mcp_call`. + # + # @return [Symbol, :mcp_call] + required :type, const: :mcp_call + + # @!attribute error + # The error from the tool call, if any. + # + # @return [String, nil] + optional :error, String, nil?: true + + # @!attribute output + # The output from the tool call. + # + # @return [String, nil] + optional :output, String, nil?: true + + # @!method initialize(id:, arguments:, name:, server_label:, error: nil, output: nil, type: :mcp_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputItem::McpCall} for more details. + # + # An invocation of a tool on an MCP server. + # + # @param id [String] The unique ID of the tool call. + # + # @param arguments [String] A JSON string of the arguments passed to the tool. + # + # @param name [String] The name of the tool that was run. + # + # @param server_label [String] The label of the MCP server running the tool. + # + # @param error [String, nil] The error from the tool call, if any. + # + # @param output [String, nil] The output from the tool call. + # + # @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`. + end + + class McpListTools < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the list. + # + # @return [String] + required :id, String + + # @!attribute server_label + # The label of the MCP server. + # + # @return [String] + required :server_label, String + + # @!attribute tools + # The tools available on the server. + # + # @return [Array] + required :tools, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool] } + + # @!attribute type + # The type of the item. Always `mcp_list_tools`. + # + # @return [Symbol, :mcp_list_tools] + required :type, const: :mcp_list_tools + + # @!attribute error + # Error message if the server could not list tools. + # + # @return [String, nil] + optional :error, String, nil?: true + + # @!method initialize(id:, server_label:, tools:, error: nil, type: :mcp_list_tools) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputItem::McpListTools} for more details. + # + # A list of tools available on an MCP server. + # + # @param id [String] The unique ID of the list. + # + # @param server_label [String] The label of the MCP server. + # + # @param tools [Array] The tools available on the server. + # + # @param error [String, nil] Error message if the server could not list tools. + # + # @param type [Symbol, :mcp_list_tools] The type of the item. Always `mcp_list_tools`. + + class Tool < OpenAI::Internal::Type::BaseModel + # @!attribute input_schema + # The JSON schema describing the tool's input. + # + # @return [Object] + required :input_schema, OpenAI::Internal::Type::Unknown + + # @!attribute name + # The name of the tool. + # + # @return [String] + required :name, String + + # @!attribute annotations + # Additional annotations about the tool. + # + # @return [Object, nil] + optional :annotations, OpenAI::Internal::Type::Unknown, nil?: true + + # @!attribute description + # The description of the tool. + # + # @return [String, nil] + optional :description, String, nil?: true + + # @!method initialize(input_schema:, name:, annotations: nil, description: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputItem::McpListTools::Tool} for more + # details. + # + # A tool available on an MCP server. + # + # @param input_schema [Object] The JSON schema describing the tool's input. + # + # @param name [String] The name of the tool. + # + # @param annotations [Object, nil] Additional annotations about the tool. + # + # @param description [String, nil] The description of the tool. + end + end + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the approval request. + # + # @return [String] + required :id, String + + # @!attribute arguments + # A JSON string of arguments for the tool. + # + # @return [String] + required :arguments, String + + # @!attribute name + # The name of the tool to run. + # + # @return [String] + required :name, String + + # @!attribute server_label + # The label of the MCP server making the request. + # + # @return [String] + required :server_label, String + + # @!attribute type + # The type of the item. Always `mcp_approval_request`. + # + # @return [Symbol, :mcp_approval_request] + required :type, const: :mcp_approval_request + + # @!method initialize(id:, arguments:, name:, server_label:, type: :mcp_approval_request) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest} for more + # details. + # + # A request for human approval of a tool invocation. + # + # @param id [String] The unique ID of the approval request. + # + # @param arguments [String] A JSON string of arguments for the tool. + # + # @param name [String] The name of the tool to run. + # + # @param server_label [String] The label of the MCP server making the request. + # + # @param type [Symbol, :mcp_approval_request] The type of the item. Always `mcp_approval_request`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall)] end end end diff --git a/lib/openai/models/responses/response_output_item_added_event.rb b/lib/openai/models/responses/response_output_item_added_event.rb index 9885b0a9..1ca221ee 100644 --- a/lib/openai/models/responses/response_output_item_added_event.rb +++ b/lib/openai/models/responses/response_output_item_added_event.rb @@ -3,12 +3,12 @@ module OpenAI module Models module Responses - class ResponseOutputItemAddedEvent < OpenAI::BaseModel + class ResponseOutputItemAddedEvent < OpenAI::Internal::Type::BaseModel # @!attribute item # The output item that was added. # - # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem] - required :item, union: -> { OpenAI::Models::Responses::ResponseOutputItem } + # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] + required :item, union: -> { OpenAI::Responses::ResponseOutputItem } # @!attribute output_index # The index of the output item that was added. @@ -16,22 +16,31 @@ class ResponseOutputItemAddedEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.output_item.added`. # # @return [Symbol, :"response.output_item.added"] required :type, const: :"response.output_item.added" - # @!parse - # # Emitted when a new output item is added. - # # - # # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.output_item.added"] - # # - # def initialize(item:, output_index:, type: :"response.output_item.added", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item:, output_index:, sequence_number:, type: :"response.output_item.added") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputItemAddedEvent} for more details. + # + # Emitted when a new output item is added. + # + # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was added. + # + # @param output_index [Integer] The index of the output item that was added. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.output_item.added"] The type of the event. Always `response.output_item.added`. end end end diff --git a/lib/openai/models/responses/response_output_item_done_event.rb b/lib/openai/models/responses/response_output_item_done_event.rb index 6840bbd7..f96435eb 100644 --- a/lib/openai/models/responses/response_output_item_done_event.rb +++ b/lib/openai/models/responses/response_output_item_done_event.rb @@ -3,12 +3,12 @@ module OpenAI module Models module Responses - class ResponseOutputItemDoneEvent < OpenAI::BaseModel + class ResponseOutputItemDoneEvent < OpenAI::Internal::Type::BaseModel # @!attribute item # The output item that was marked done. # - # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem] - required :item, union: -> { OpenAI::Models::Responses::ResponseOutputItem } + # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] + required :item, union: -> { OpenAI::Responses::ResponseOutputItem } # @!attribute output_index # The index of the output item that was marked done. @@ -16,22 +16,31 @@ class ResponseOutputItemDoneEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.output_item.done`. # # @return [Symbol, :"response.output_item.done"] required :type, const: :"response.output_item.done" - # @!parse - # # Emitted when an output item is marked done. - # # - # # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.output_item.done"] - # # - # def initialize(item:, output_index:, type: :"response.output_item.done", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item:, output_index:, sequence_number:, type: :"response.output_item.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputItemDoneEvent} for more details. + # + # Emitted when an output item is marked done. + # + # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was marked done. + # + # @param output_index [Integer] The index of the output item that was marked done. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.output_item.done"] The type of the event. Always `response.output_item.done`. end end end diff --git a/lib/openai/models/responses/response_output_message.rb b/lib/openai/models/responses/response_output_message.rb index 7fcb4c6a..0f5f43e6 100644 --- a/lib/openai/models/responses/response_output_message.rb +++ b/lib/openai/models/responses/response_output_message.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseOutputMessage < OpenAI::BaseModel + class ResponseOutputMessage < OpenAI::Internal::Type::BaseModel # @!attribute id # The unique ID of the output message. # @@ -15,7 +15,7 @@ class ResponseOutputMessage < OpenAI::BaseModel # # @return [Array] required :content, - -> { OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseOutputMessage::Content] } + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputMessage::Content] } # @!attribute role # The role of the output message. Always `assistant`. @@ -25,10 +25,10 @@ class ResponseOutputMessage < OpenAI::BaseModel # @!attribute status # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. + # `incomplete`. Populated when input items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseOutputMessage::Status] - required :status, enum: -> { OpenAI::Models::Responses::ResponseOutputMessage::Status } + required :status, enum: -> { OpenAI::Responses::ResponseOutputMessage::Status } # @!attribute type # The type of the output message. Always `message`. @@ -36,42 +36,51 @@ class ResponseOutputMessage < OpenAI::BaseModel # @return [Symbol, :message] required :type, const: :message - # @!parse - # # An output message from the model. - # # - # # @param id [String] - # # @param content [Array] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseOutputMessage::Status] - # # @param role [Symbol, :assistant] - # # @param type [Symbol, :message] - # # - # def initialize(id:, content:, status:, role: :assistant, type: :message, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(id:, content:, status:, role: :assistant, type: :message) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputMessage} for more details. + # + # An output message from the model. + # + # @param id [String] The unique ID of the output message. + # + # @param content [Array] The content of the output message. + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseOutputMessage::Status] The status of the message input. One of `in_progress`, `completed`, or + # + # @param role [Symbol, :assistant] The role of the output message. Always `assistant`. # + # @param type [Symbol, :message] The type of the output message. Always `message`. + # A text output from the model. - class Content < OpenAI::Union + module Content + extend OpenAI::Internal::Type::Union + discriminator :type # A text output from the model. - variant :output_text, -> { OpenAI::Models::Responses::ResponseOutputText } + variant :output_text, -> { OpenAI::Responses::ResponseOutputText } # A refusal from the model. - variant :refusal, -> { OpenAI::Models::Responses::ResponseOutputRefusal } + variant :refusal, -> { OpenAI::Responses::ResponseOutputRefusal } + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] end - # @abstract - # # The status of the message input. One of `in_progress`, `completed`, or - # `incomplete`. Populated when input items are returned via API. - class Status < OpenAI::Enum + # `incomplete`. Populated when input items are returned via API. + # + # @see OpenAI::Models::Responses::ResponseOutputMessage#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_output_refusal.rb b/lib/openai/models/responses/response_output_refusal.rb index bd44ae1e..bf6272e6 100644 --- a/lib/openai/models/responses/response_output_refusal.rb +++ b/lib/openai/models/responses/response_output_refusal.rb @@ -3,9 +3,9 @@ module OpenAI module Models module Responses - class ResponseOutputRefusal < OpenAI::BaseModel + class ResponseOutputRefusal < OpenAI::Internal::Type::BaseModel # @!attribute refusal - # The refusal explanationfrom the model. + # The refusal explanation from the model. # # @return [String] required :refusal, String @@ -16,15 +16,12 @@ class ResponseOutputRefusal < OpenAI::BaseModel # @return [Symbol, :refusal] required :type, const: :refusal - # @!parse - # # A refusal from the model. - # # - # # @param refusal [String] - # # @param type [Symbol, :refusal] - # # - # def initialize(refusal:, type: :refusal, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(refusal:, type: :refusal) + # A refusal from the model. + # + # @param refusal [String] The refusal explanation from the model. + # + # @param type [Symbol, :refusal] The type of the refusal. Always `refusal`. end end end diff --git a/lib/openai/models/responses/response_output_text.rb b/lib/openai/models/responses/response_output_text.rb index 8e83fef4..b290fe14 100644 --- a/lib/openai/models/responses/response_output_text.rb +++ b/lib/openai/models/responses/response_output_text.rb @@ -3,13 +3,13 @@ module OpenAI module Models module Responses - class ResponseOutputText < OpenAI::BaseModel + class ResponseOutputText < OpenAI::Internal::Type::BaseModel # @!attribute annotations # The annotations of the text output. # - # @return [Array] + # @return [Array] required :annotations, - -> { OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseOutputText::Annotation] } + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation] } # @!attribute text # The text output from the model. @@ -23,39 +23,54 @@ class ResponseOutputText < OpenAI::BaseModel # @return [Symbol, :output_text] required :type, const: :output_text - # @!parse - # # A text output from the model. - # # - # # @param annotations [Array] - # # @param text [String] - # # @param type [Symbol, :output_text] - # # - # def initialize(annotations:, text:, type: :output_text, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!attribute logprobs + # + # @return [Array, nil] + optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob] } - # @abstract + # @!method initialize(annotations:, text:, logprobs: nil, type: :output_text) + # A text output from the model. + # + # @param annotations [Array] The annotations of the text output. + # + # @param text [String] The text output from the model. + # + # @param logprobs [Array] # + # @param type [Symbol, :output_text] The type of the output text. Always `output_text`. + # A citation to a file. - class Annotation < OpenAI::Union + module Annotation + extend OpenAI::Internal::Type::Union + discriminator :type # A citation to a file. - variant :file_citation, -> { OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation } + variant :file_citation, -> { OpenAI::Responses::ResponseOutputText::Annotation::FileCitation } # A citation for a web resource used to generate a model response. - variant :url_citation, -> { OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation } + variant :url_citation, -> { OpenAI::Responses::ResponseOutputText::Annotation::URLCitation } + + # A citation for a container file used to generate a model response. + variant :container_file_citation, + -> { OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation } # A path to a file. - variant :file_path, -> { OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath } + variant :file_path, -> { OpenAI::Responses::ResponseOutputText::Annotation::FilePath } - class FileCitation < OpenAI::BaseModel + class FileCitation < OpenAI::Internal::Type::BaseModel # @!attribute file_id # The ID of the file. # # @return [String] required :file_id, String + # @!attribute filename + # The filename of the file cited. + # + # @return [String] + required :filename, String + # @!attribute index # The index of the file in the list of files. # @@ -68,19 +83,19 @@ class FileCitation < OpenAI::BaseModel # @return [Symbol, :file_citation] required :type, const: :file_citation - # @!parse - # # A citation to a file. - # # - # # @param file_id [String] - # # @param index [Integer] - # # @param type [Symbol, :file_citation] - # # - # def initialize(file_id:, index:, type: :file_citation, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(file_id:, filename:, index:, type: :file_citation) + # A citation to a file. + # + # @param file_id [String] The ID of the file. + # + # @param filename [String] The filename of the file cited. + # + # @param index [Integer] The index of the file in the list of files. + # + # @param type [Symbol, :file_citation] The type of the file citation. Always `file_citation`. end - class URLCitation < OpenAI::BaseModel + class URLCitation < OpenAI::Internal::Type::BaseModel # @!attribute end_index # The index of the last character of the URL citation in the message. # @@ -111,21 +126,74 @@ class URLCitation < OpenAI::BaseModel # @return [String] required :url, String - # @!parse - # # A citation for a web resource used to generate a model response. - # # - # # @param end_index [Integer] - # # @param start_index [Integer] - # # @param title [String] - # # @param url [String] - # # @param type [Symbol, :url_citation] - # # - # def initialize(end_index:, start_index:, title:, url:, type: :url_citation, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(end_index:, start_index:, title:, url:, type: :url_citation) + # A citation for a web resource used to generate a model response. + # + # @param end_index [Integer] The index of the last character of the URL citation in the message. + # + # @param start_index [Integer] The index of the first character of the URL citation in the message. + # + # @param title [String] The title of the web resource. + # + # @param url [String] The URL of the web resource. + # + # @param type [Symbol, :url_citation] The type of the URL citation. Always `url_citation`. + end + + class ContainerFileCitation < OpenAI::Internal::Type::BaseModel + # @!attribute container_id + # The ID of the container file. + # + # @return [String] + required :container_id, String + + # @!attribute end_index + # The index of the last character of the container file citation in the message. + # + # @return [Integer] + required :end_index, Integer + + # @!attribute file_id + # The ID of the file. + # + # @return [String] + required :file_id, String + + # @!attribute filename + # The filename of the container file cited. + # + # @return [String] + required :filename, String + + # @!attribute start_index + # The index of the first character of the container file citation in the message. + # + # @return [Integer] + required :start_index, Integer + + # @!attribute type + # The type of the container file citation. Always `container_file_citation`. + # + # @return [Symbol, :container_file_citation] + required :type, const: :container_file_citation + + # @!method initialize(container_id:, end_index:, file_id:, filename:, start_index:, type: :container_file_citation) + # A citation for a container file used to generate a model response. + # + # @param container_id [String] The ID of the container file. + # + # @param end_index [Integer] The index of the last character of the container file citation in the message. + # + # @param file_id [String] The ID of the file. + # + # @param filename [String] The filename of the container file cited. + # + # @param start_index [Integer] The index of the first character of the container file citation in the message. + # + # @param type [Symbol, :container_file_citation] The type of the container file citation. Always `container_file_citation`. end - class FilePath < OpenAI::BaseModel + class FilePath < OpenAI::Internal::Type::BaseModel # @!attribute file_id # The ID of the file. # @@ -144,16 +212,76 @@ class FilePath < OpenAI::BaseModel # @return [Symbol, :file_path] required :type, const: :file_path - # @!parse - # # A path to a file. - # # - # # @param file_id [String] - # # @param index [Integer] - # # @param type [Symbol, :file_path] - # # - # def initialize(file_id:, index:, type: :file_path, **) = super + # @!method initialize(file_id:, index:, type: :file_path) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath} for more + # details. + # + # A path to a file. + # + # @param file_id [String] The ID of the file. + # + # @param index [Integer] The index of the file in the list of files. + # + # @param type [Symbol, :file_path] The type of the file path. Always `file_path`. + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath)] + end + + class Logprob < OpenAI::Internal::Type::BaseModel + # @!attribute token + # + # @return [String] + required :token, String + + # @!attribute bytes + # + # @return [Array] + required :bytes, OpenAI::Internal::Type::ArrayOf[Integer] + + # @!attribute logprob + # + # @return [Float] + required :logprob, Float + + # @!attribute top_logprobs + # + # @return [Array] + required :top_logprobs, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob] } - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(token:, bytes:, logprob:, top_logprobs:) + # The log probability of a token. + # + # @param token [String] + # @param bytes [Array] + # @param logprob [Float] + # @param top_logprobs [Array] + + class TopLogprob < OpenAI::Internal::Type::BaseModel + # @!attribute token + # + # @return [String] + required :token, String + + # @!attribute bytes + # + # @return [Array] + required :bytes, OpenAI::Internal::Type::ArrayOf[Integer] + + # @!attribute logprob + # + # @return [Float] + required :logprob, Float + + # @!method initialize(token:, bytes:, logprob:) + # The top log probability of a token. + # + # @param token [String] + # @param bytes [Array] + # @param logprob [Float] end end end diff --git a/lib/openai/models/responses/response_output_text_annotation_added_event.rb b/lib/openai/models/responses/response_output_text_annotation_added_event.rb new file mode 100644 index 00000000..0a113db0 --- /dev/null +++ b/lib/openai/models/responses/response_output_text_annotation_added_event.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseOutputTextAnnotationAddedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute annotation + # The annotation object being added. (See annotation schema for details.) + # + # @return [Object] + required :annotation, OpenAI::Internal::Type::Unknown + + # @!attribute annotation_index + # The index of the annotation within the content part. + # + # @return [Integer] + required :annotation_index, Integer + + # @!attribute content_index + # The index of the content part within the output item. + # + # @return [Integer] + required :content_index, Integer + + # @!attribute item_id + # The unique identifier of the item to which the annotation is being added. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item in the response's output array. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.output_text.annotation.added'. + # + # @return [Symbol, :"response.output_text.annotation.added"] + required :type, const: :"response.output_text.annotation.added" + + # @!method initialize(annotation:, annotation_index:, content_index:, item_id:, output_index:, sequence_number:, type: :"response.output_text.annotation.added") + # Emitted when an annotation is added to output text content. + # + # @param annotation [Object] The annotation object being added. (See annotation schema for details.) + # + # @param annotation_index [Integer] The index of the annotation within the content part. + # + # @param content_index [Integer] The index of the content part within the output item. + # + # @param item_id [String] The unique identifier of the item to which the annotation is being added. + # + # @param output_index [Integer] The index of the output item in the response's output array. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.output_text.annotation.added"] The type of the event. Always 'response.output_text.annotation.added'. + end + end + end +end diff --git a/lib/openai/models/responses/response_prompt.rb b/lib/openai/models/responses/response_prompt.rb new file mode 100644 index 00000000..aa5b7fb0 --- /dev/null +++ b/lib/openai/models/responses/response_prompt.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponsePrompt < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique identifier of the prompt template to use. + # + # @return [String] + required :id, String + + # @!attribute variables + # Optional map of values to substitute in for variables in your prompt. The + # substitution values can either be strings, or other Response input types like + # images or files. + # + # @return [Hash{Symbol=>String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile}, nil] + optional :variables, + -> { OpenAI::Internal::Type::HashOf[union: OpenAI::Responses::ResponsePrompt::Variable] }, + nil?: true + + # @!attribute version + # Optional version of the prompt template. + # + # @return [String, nil] + optional :version, String, nil?: true + + # @!method initialize(id:, variables: nil, version: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponsePrompt} for more details. + # + # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + # + # @param id [String] The unique identifier of the prompt template to use. + # + # @param variables [Hash{Symbol=>String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile}, nil] Optional map of values to substitute in for variables in your + # + # @param version [String, nil] Optional version of the prompt template. + + # A text input to the model. + module Variable + extend OpenAI::Internal::Type::Union + + variant String + + # A text input to the model. + variant -> { OpenAI::Responses::ResponseInputText } + + # An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + variant -> { OpenAI::Responses::ResponseInputImage } + + # A file input to the model. + variant -> { OpenAI::Responses::ResponseInputFile } + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile)] + end + end + end + end +end diff --git a/lib/openai/models/responses/response_queued_event.rb b/lib/openai/models/responses/response_queued_event.rb new file mode 100644 index 00000000..0625aa8b --- /dev/null +++ b/lib/openai/models/responses/response_queued_event.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseQueuedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute response + # The full response object that is queued. + # + # @return [OpenAI::Models::Responses::Response] + required :response, -> { OpenAI::Responses::Response } + + # @!attribute sequence_number + # The sequence number for this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always 'response.queued'. + # + # @return [Symbol, :"response.queued"] + required :type, const: :"response.queued" + + # @!method initialize(response:, sequence_number:, type: :"response.queued") + # Emitted when a response is queued and waiting to be processed. + # + # @param response [OpenAI::Models::Responses::Response] The full response object that is queued. + # + # @param sequence_number [Integer] The sequence number for this event. + # + # @param type [Symbol, :"response.queued"] The type of the event. Always 'response.queued'. + end + end + end +end diff --git a/lib/openai/models/responses/response_reasoning_item.rb b/lib/openai/models/responses/response_reasoning_item.rb index c4d60d3f..3c4068fb 100644 --- a/lib/openai/models/responses/response_reasoning_item.rb +++ b/lib/openai/models/responses/response_reasoning_item.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseReasoningItem < OpenAI::BaseModel + class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel # @!attribute id # The unique identifier of the reasoning content. # @@ -11,10 +11,11 @@ class ResponseReasoningItem < OpenAI::BaseModel required :id, String # @!attribute summary - # Reasoning text contents. + # Reasoning summary content. # # @return [Array] - required :summary, -> { OpenAI::ArrayOf[OpenAI::Models::Responses::ResponseReasoningItem::Summary] } + required :summary, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseReasoningItem::Summary] } # @!attribute type # The type of the object. Always `reasoning`. @@ -22,33 +23,51 @@ class ResponseReasoningItem < OpenAI::BaseModel # @return [Symbol, :reasoning] required :type, const: :reasoning - # @!attribute [r] status + # @!attribute content + # Reasoning text content. + # + # @return [Array, nil] + optional :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseReasoningItem::Content] } + + # @!attribute encrypted_content + # The encrypted content of the reasoning item - populated when a response is + # generated with `reasoning.encrypted_content` in the `include` parameter. + # + # @return [String, nil] + optional :encrypted_content, String, nil?: true + + # @!attribute status # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. + # Populated when items are returned via API. # # @return [Symbol, OpenAI::Models::Responses::ResponseReasoningItem::Status, nil] - optional :status, enum: -> { OpenAI::Models::Responses::ResponseReasoningItem::Status } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::ResponseReasoningItem::Status] - # attr_writer :status - - # @!parse - # # A description of the chain of thought used by a reasoning model while generating - # # a response. - # # - # # @param id [String] - # # @param summary [Array] - # # @param status [Symbol, OpenAI::Models::Responses::ResponseReasoningItem::Status] - # # @param type [Symbol, :reasoning] - # # - # def initialize(id:, summary:, status: nil, type: :reasoning, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class Summary < OpenAI::BaseModel + optional :status, enum: -> { OpenAI::Responses::ResponseReasoningItem::Status } + + # @!method initialize(id:, summary:, content: nil, encrypted_content: nil, status: nil, type: :reasoning) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningItem} for more details. + # + # A description of the chain of thought used by a reasoning model while generating + # a response. Be sure to include these items in your `input` to the Responses API + # for subsequent turns of a conversation if you are manually + # [managing context](https://platform.openai.com/docs/guides/conversation-state). + # + # @param id [String] The unique identifier of the reasoning content. + # + # @param summary [Array] Reasoning summary content. + # + # @param content [Array] Reasoning text content. + # + # @param encrypted_content [String, nil] The encrypted content of the reasoning item - populated when a response is + # + # @param status [Symbol, OpenAI::Models::Responses::ResponseReasoningItem::Status] The status of the item. One of `in_progress`, `completed`, or + # + # @param type [Symbol, :reasoning] The type of the object. Always `reasoning`. + + class Summary < OpenAI::Internal::Type::BaseModel # @!attribute text - # A short summary of the reasoning used by the model when generating the response. + # A summary of the reasoning output from the model so far. # # @return [String] required :text, String @@ -59,25 +78,50 @@ class Summary < OpenAI::BaseModel # @return [Symbol, :summary_text] required :type, const: :summary_text - # @!parse - # # @param text [String] - # # @param type [Symbol, :summary_text] - # # - # def initialize(text:, type: :summary_text, **) = super + # @!method initialize(text:, type: :summary_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningItem::Summary} for more details. + # + # @param text [String] A summary of the reasoning output from the model so far. + # + # @param type [Symbol, :summary_text] The type of the object. Always `summary_text`. + end + + class Content < OpenAI::Internal::Type::BaseModel + # @!attribute text + # Reasoning text output from the model. + # + # @return [String] + required :text, String - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!attribute type + # The type of the object. Always `reasoning_text`. + # + # @return [Symbol, :reasoning_text] + required :type, const: :reasoning_text + + # @!method initialize(text:, type: :reasoning_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningItem::Content} for more details. + # + # @param text [String] Reasoning text output from the model. + # + # @param type [Symbol, :reasoning_text] The type of the object. Always `reasoning_text`. end - # @abstract - # # The status of the item. One of `in_progress`, `completed`, or `incomplete`. - # Populated when items are returned via API. - class Status < OpenAI::Enum + # Populated when items are returned via API. + # + # @see OpenAI::Models::Responses::ResponseReasoningItem#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed INCOMPLETE = :incomplete - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_reasoning_summary_part_added_event.rb b/lib/openai/models/responses/response_reasoning_summary_part_added_event.rb new file mode 100644 index 00000000..88b7f071 --- /dev/null +++ b/lib/openai/models/responses/response_reasoning_summary_part_added_event.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseReasoningSummaryPartAddedEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The ID of the item this summary part is associated with. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item this summary part is associated with. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute part + # The summary part that was added. + # + # @return [OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent::Part] + required :part, -> { OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part } + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute summary_index + # The index of the summary part within the reasoning summary. + # + # @return [Integer] + required :summary_index, Integer + + # @!attribute type + # The type of the event. Always `response.reasoning_summary_part.added`. + # + # @return [Symbol, :"response.reasoning_summary_part.added"] + required :type, const: :"response.reasoning_summary_part.added" + + # @!method initialize(item_id:, output_index:, part:, sequence_number:, summary_index:, type: :"response.reasoning_summary_part.added") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent} for more + # details. + # + # Emitted when a new reasoning summary part is added. + # + # @param item_id [String] The ID of the item this summary part is associated with. + # + # @param output_index [Integer] The index of the output item this summary part is associated with. + # + # @param part [OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent::Part] The summary part that was added. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param summary_index [Integer] The index of the summary part within the reasoning summary. + # + # @param type [Symbol, :"response.reasoning_summary_part.added"] The type of the event. Always `response.reasoning_summary_part.added`. + + # @see OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent#part + class Part < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text of the summary part. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the summary part. Always `summary_text`. + # + # @return [Symbol, :summary_text] + required :type, const: :summary_text + + # @!method initialize(text:, type: :summary_text) + # The summary part that was added. + # + # @param text [String] The text of the summary part. + # + # @param type [Symbol, :summary_text] The type of the summary part. Always `summary_text`. + end + end + end + end +end diff --git a/lib/openai/models/responses/response_reasoning_summary_part_done_event.rb b/lib/openai/models/responses/response_reasoning_summary_part_done_event.rb new file mode 100644 index 00000000..c45beaa4 --- /dev/null +++ b/lib/openai/models/responses/response_reasoning_summary_part_done_event.rb @@ -0,0 +1,86 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseReasoningSummaryPartDoneEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The ID of the item this summary part is associated with. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item this summary part is associated with. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute part + # The completed summary part. + # + # @return [OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent::Part] + required :part, -> { OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part } + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute summary_index + # The index of the summary part within the reasoning summary. + # + # @return [Integer] + required :summary_index, Integer + + # @!attribute type + # The type of the event. Always `response.reasoning_summary_part.done`. + # + # @return [Symbol, :"response.reasoning_summary_part.done"] + required :type, const: :"response.reasoning_summary_part.done" + + # @!method initialize(item_id:, output_index:, part:, sequence_number:, summary_index:, type: :"response.reasoning_summary_part.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent} for more + # details. + # + # Emitted when a reasoning summary part is completed. + # + # @param item_id [String] The ID of the item this summary part is associated with. + # + # @param output_index [Integer] The index of the output item this summary part is associated with. + # + # @param part [OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent::Part] The completed summary part. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param summary_index [Integer] The index of the summary part within the reasoning summary. + # + # @param type [Symbol, :"response.reasoning_summary_part.done"] The type of the event. Always `response.reasoning_summary_part.done`. + + # @see OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent#part + class Part < OpenAI::Internal::Type::BaseModel + # @!attribute text + # The text of the summary part. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the summary part. Always `summary_text`. + # + # @return [Symbol, :summary_text] + required :type, const: :summary_text + + # @!method initialize(text:, type: :summary_text) + # The completed summary part. + # + # @param text [String] The text of the summary part. + # + # @param type [Symbol, :summary_text] The type of the summary part. Always `summary_text`. + end + end + end + end +end diff --git a/lib/openai/models/responses/response_reasoning_summary_text_delta_event.rb b/lib/openai/models/responses/response_reasoning_summary_text_delta_event.rb new file mode 100644 index 00000000..9f801ed9 --- /dev/null +++ b/lib/openai/models/responses/response_reasoning_summary_text_delta_event.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseReasoningSummaryTextDeltaEvent < OpenAI::Internal::Type::BaseModel + # @!attribute delta + # The text delta that was added to the summary. + # + # @return [String] + required :delta, String + + # @!attribute item_id + # The ID of the item this summary text delta is associated with. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item this summary text delta is associated with. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute summary_index + # The index of the summary part within the reasoning summary. + # + # @return [Integer] + required :summary_index, Integer + + # @!attribute type + # The type of the event. Always `response.reasoning_summary_text.delta`. + # + # @return [Symbol, :"response.reasoning_summary_text.delta"] + required :type, const: :"response.reasoning_summary_text.delta" + + # @!method initialize(delta:, item_id:, output_index:, sequence_number:, summary_index:, type: :"response.reasoning_summary_text.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent} for more + # details. + # + # Emitted when a delta is added to a reasoning summary text. + # + # @param delta [String] The text delta that was added to the summary. + # + # @param item_id [String] The ID of the item this summary text delta is associated with. + # + # @param output_index [Integer] The index of the output item this summary text delta is associated with. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param summary_index [Integer] The index of the summary part within the reasoning summary. + # + # @param type [Symbol, :"response.reasoning_summary_text.delta"] The type of the event. Always `response.reasoning_summary_text.delta`. + end + end + end +end diff --git a/lib/openai/models/responses/response_reasoning_summary_text_done_event.rb b/lib/openai/models/responses/response_reasoning_summary_text_done_event.rb new file mode 100644 index 00000000..d77661fc --- /dev/null +++ b/lib/openai/models/responses/response_reasoning_summary_text_done_event.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseReasoningSummaryTextDoneEvent < OpenAI::Internal::Type::BaseModel + # @!attribute item_id + # The ID of the item this summary text is associated with. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item this summary text is associated with. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute summary_index + # The index of the summary part within the reasoning summary. + # + # @return [Integer] + required :summary_index, Integer + + # @!attribute text + # The full text of the completed reasoning summary. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the event. Always `response.reasoning_summary_text.done`. + # + # @return [Symbol, :"response.reasoning_summary_text.done"] + required :type, const: :"response.reasoning_summary_text.done" + + # @!method initialize(item_id:, output_index:, sequence_number:, summary_index:, text:, type: :"response.reasoning_summary_text.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent} for more + # details. + # + # Emitted when a reasoning summary text is completed. + # + # @param item_id [String] The ID of the item this summary text is associated with. + # + # @param output_index [Integer] The index of the output item this summary text is associated with. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param summary_index [Integer] The index of the summary part within the reasoning summary. + # + # @param text [String] The full text of the completed reasoning summary. + # + # @param type [Symbol, :"response.reasoning_summary_text.done"] The type of the event. Always `response.reasoning_summary_text.done`. + end + end + end +end diff --git a/lib/openai/models/responses/response_reasoning_text_delta_event.rb b/lib/openai/models/responses/response_reasoning_text_delta_event.rb new file mode 100644 index 00000000..fc3380bb --- /dev/null +++ b/lib/openai/models/responses/response_reasoning_text_delta_event.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseReasoningTextDeltaEvent < OpenAI::Internal::Type::BaseModel + # @!attribute content_index + # The index of the reasoning content part this delta is associated with. + # + # @return [Integer] + required :content_index, Integer + + # @!attribute delta + # The text delta that was added to the reasoning content. + # + # @return [String] + required :delta, String + + # @!attribute item_id + # The ID of the item this reasoning text delta is associated with. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item this reasoning text delta is associated with. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always `response.reasoning_text.delta`. + # + # @return [Symbol, :"response.reasoning_text.delta"] + required :type, const: :"response.reasoning_text.delta" + + # @!method initialize(content_index:, delta:, item_id:, output_index:, sequence_number:, type: :"response.reasoning_text.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningTextDeltaEvent} for more details. + # + # Emitted when a delta is added to a reasoning text. + # + # @param content_index [Integer] The index of the reasoning content part this delta is associated with. + # + # @param delta [String] The text delta that was added to the reasoning content. + # + # @param item_id [String] The ID of the item this reasoning text delta is associated with. + # + # @param output_index [Integer] The index of the output item this reasoning text delta is associated with. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.reasoning_text.delta"] The type of the event. Always `response.reasoning_text.delta`. + end + end + end +end diff --git a/lib/openai/models/responses/response_reasoning_text_done_event.rb b/lib/openai/models/responses/response_reasoning_text_done_event.rb new file mode 100644 index 00000000..e07630bc --- /dev/null +++ b/lib/openai/models/responses/response_reasoning_text_done_event.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseReasoningTextDoneEvent < OpenAI::Internal::Type::BaseModel + # @!attribute content_index + # The index of the reasoning content part. + # + # @return [Integer] + required :content_index, Integer + + # @!attribute item_id + # The ID of the item this reasoning text is associated with. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item this reasoning text is associated with. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute text + # The full text of the completed reasoning content. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the event. Always `response.reasoning_text.done`. + # + # @return [Symbol, :"response.reasoning_text.done"] + required :type, const: :"response.reasoning_text.done" + + # @!method initialize(content_index:, item_id:, output_index:, sequence_number:, text:, type: :"response.reasoning_text.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningTextDoneEvent} for more details. + # + # Emitted when a reasoning text is completed. + # + # @param content_index [Integer] The index of the reasoning content part. + # + # @param item_id [String] The ID of the item this reasoning text is associated with. + # + # @param output_index [Integer] The index of the output item this reasoning text is associated with. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param text [String] The full text of the completed reasoning content. + # + # @param type [Symbol, :"response.reasoning_text.done"] The type of the event. Always `response.reasoning_text.done`. + end + end + end +end diff --git a/lib/openai/models/responses/response_refusal_delta_event.rb b/lib/openai/models/responses/response_refusal_delta_event.rb index b09b8ef2..f912ec9e 100644 --- a/lib/openai/models/responses/response_refusal_delta_event.rb +++ b/lib/openai/models/responses/response_refusal_delta_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseRefusalDeltaEvent < OpenAI::BaseModel + class ResponseRefusalDeltaEvent < OpenAI::Internal::Type::BaseModel # @!attribute content_index # The index of the content part that the refusal text is added to. # @@ -28,24 +28,35 @@ class ResponseRefusalDeltaEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.refusal.delta`. # # @return [Symbol, :"response.refusal.delta"] required :type, const: :"response.refusal.delta" - # @!parse - # # Emitted when there is a partial refusal text. - # # - # # @param content_index [Integer] - # # @param delta [String] - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.refusal.delta"] - # # - # def initialize(content_index:, delta:, item_id:, output_index:, type: :"response.refusal.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(content_index:, delta:, item_id:, output_index:, sequence_number:, type: :"response.refusal.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseRefusalDeltaEvent} for more details. + # + # Emitted when there is a partial refusal text. + # + # @param content_index [Integer] The index of the content part that the refusal text is added to. + # + # @param delta [String] The refusal text that is added. + # + # @param item_id [String] The ID of the output item that the refusal text is added to. + # + # @param output_index [Integer] The index of the output item that the refusal text is added to. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.refusal.delta"] The type of the event. Always `response.refusal.delta`. end end end diff --git a/lib/openai/models/responses/response_refusal_done_event.rb b/lib/openai/models/responses/response_refusal_done_event.rb index 662705f7..4d428943 100644 --- a/lib/openai/models/responses/response_refusal_done_event.rb +++ b/lib/openai/models/responses/response_refusal_done_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseRefusalDoneEvent < OpenAI::BaseModel + class ResponseRefusalDoneEvent < OpenAI::Internal::Type::BaseModel # @!attribute content_index # The index of the content part that the refusal text is finalized. # @@ -28,24 +28,35 @@ class ResponseRefusalDoneEvent < OpenAI::BaseModel # @return [String] required :refusal, String + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.refusal.done`. # # @return [Symbol, :"response.refusal.done"] required :type, const: :"response.refusal.done" - # @!parse - # # Emitted when refusal text is finalized. - # # - # # @param content_index [Integer] - # # @param item_id [String] - # # @param output_index [Integer] - # # @param refusal [String] - # # @param type [Symbol, :"response.refusal.done"] - # # - # def initialize(content_index:, item_id:, output_index:, refusal:, type: :"response.refusal.done", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(content_index:, item_id:, output_index:, refusal:, sequence_number:, type: :"response.refusal.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseRefusalDoneEvent} for more details. + # + # Emitted when refusal text is finalized. + # + # @param content_index [Integer] The index of the content part that the refusal text is finalized. + # + # @param item_id [String] The ID of the output item that the refusal text is finalized. + # + # @param output_index [Integer] The index of the output item that the refusal text is finalized. + # + # @param refusal [String] The refusal text that is finalized. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.refusal.done"] The type of the event. Always `response.refusal.done`. end end end diff --git a/lib/openai/models/responses/response_retrieve_params.rb b/lib/openai/models/responses/response_retrieve_params.rb index 4ca65d89..67aa9110 100644 --- a/lib/openai/models/responses/response_retrieve_params.rb +++ b/lib/openai/models/responses/response_retrieve_params.rb @@ -3,29 +3,48 @@ module OpenAI module Models module Responses - class ResponseRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Responses#retrieve + # + # @see OpenAI::Resources::Responses#retrieve_streaming + class ResponseRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] include + # @!attribute include # Additional fields to include in the response. See the `include` parameter for - # Response creation above for more information. + # Response creation above for more information. # # @return [Array, nil] - optional :include, -> { OpenAI::ArrayOf[enum: OpenAI::Models::Responses::ResponseIncludable] } + optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] } - # @!parse - # # @return [Array] - # attr_writer :include + # @!attribute include_obfuscation + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + # + # @return [Boolean, nil] + optional :include_obfuscation, OpenAI::Internal::Type::Boolean - # @!parse - # # @param include [Array] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(include: nil, request_options: {}, **) = super + # @!attribute starting_after + # The sequence number of the event after which to start streaming. + # + # @return [Integer, nil] + optional :starting_after, Integer - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(include: nil, include_obfuscation: nil, starting_after: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseRetrieveParams} for more details. + # + # @param include [Array] Additional fields to include in the response. See the `include` + # + # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds + # + # @param starting_after [Integer] The sequence number of the event after which to start streaming. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/responses/response_status.rb b/lib/openai/models/responses/response_status.rb index e15a7a6e..20e16a85 100644 --- a/lib/openai/models/responses/response_status.rb +++ b/lib/openai/models/responses/response_status.rb @@ -3,17 +3,20 @@ module OpenAI module Models module Responses - # @abstract - # # The status of the response generation. One of `completed`, `failed`, - # `in_progress`, or `incomplete`. - class ResponseStatus < OpenAI::Enum + # `in_progress`, `cancelled`, `queued`, or `incomplete`. + module ResponseStatus + extend OpenAI::Internal::Type::Enum + COMPLETED = :completed FAILED = :failed IN_PROGRESS = :in_progress + CANCELLED = :cancelled + QUEUED = :queued INCOMPLETE = :incomplete - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/response_stream_event.rb b/lib/openai/models/responses/response_stream_event.rb index 571c3d7a..c5eeca54 100644 --- a/lib/openai/models/responses/response_stream_event.rb +++ b/lib/openai/models/responses/response_stream_event.rb @@ -3,123 +3,198 @@ module OpenAI module Models module Responses - # @abstract - # # Emitted when there is a partial audio response. - class ResponseStreamEvent < OpenAI::Union + module ResponseStreamEvent + extend OpenAI::Internal::Type::Union + discriminator :type # Emitted when there is a partial audio response. - variant :"response.audio.delta", -> { OpenAI::Models::Responses::ResponseAudioDeltaEvent } + variant :"response.audio.delta", -> { OpenAI::Responses::ResponseAudioDeltaEvent } # Emitted when the audio response is complete. - variant :"response.audio.done", -> { OpenAI::Models::Responses::ResponseAudioDoneEvent } + variant :"response.audio.done", -> { OpenAI::Responses::ResponseAudioDoneEvent } # Emitted when there is a partial transcript of audio. - variant :"response.audio.transcript.delta", - -> { OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent } + variant :"response.audio.transcript.delta", -> { OpenAI::Responses::ResponseAudioTranscriptDeltaEvent } # Emitted when the full audio transcript is completed. - variant :"response.audio.transcript.done", - -> { OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent } + variant :"response.audio.transcript.done", -> { OpenAI::Responses::ResponseAudioTranscriptDoneEvent } - # Emitted when a partial code snippet is added by the code interpreter. - variant :"response.code_interpreter_call.code.delta", - -> { OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent } + # Emitted when a partial code snippet is streamed by the code interpreter. + variant :"response.code_interpreter_call_code.delta", + -> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDeltaEvent } - # Emitted when code snippet output is finalized by the code interpreter. - variant :"response.code_interpreter_call.code.done", - -> { OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent } + # Emitted when the code snippet is finalized by the code interpreter. + variant :"response.code_interpreter_call_code.done", + -> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDoneEvent } # Emitted when the code interpreter call is completed. variant :"response.code_interpreter_call.completed", - -> { OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent } + -> { OpenAI::Responses::ResponseCodeInterpreterCallCompletedEvent } # Emitted when a code interpreter call is in progress. variant :"response.code_interpreter_call.in_progress", - -> { OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent } + -> { OpenAI::Responses::ResponseCodeInterpreterCallInProgressEvent } # Emitted when the code interpreter is actively interpreting the code snippet. variant :"response.code_interpreter_call.interpreting", - -> { OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent } + -> { OpenAI::Responses::ResponseCodeInterpreterCallInterpretingEvent } # Emitted when the model response is complete. - variant :"response.completed", -> { OpenAI::Models::Responses::ResponseCompletedEvent } + variant :"response.completed", -> { OpenAI::Responses::ResponseCompletedEvent } # Emitted when a new content part is added. - variant :"response.content_part.added", -> { OpenAI::Models::Responses::ResponseContentPartAddedEvent } + variant :"response.content_part.added", -> { OpenAI::Responses::ResponseContentPartAddedEvent } # Emitted when a content part is done. - variant :"response.content_part.done", -> { OpenAI::Models::Responses::ResponseContentPartDoneEvent } + variant :"response.content_part.done", -> { OpenAI::Responses::ResponseContentPartDoneEvent } # An event that is emitted when a response is created. - variant :"response.created", -> { OpenAI::Models::Responses::ResponseCreatedEvent } + variant :"response.created", -> { OpenAI::Responses::ResponseCreatedEvent } # Emitted when an error occurs. - variant :error, -> { OpenAI::Models::Responses::ResponseErrorEvent } + variant :error, -> { OpenAI::Responses::ResponseErrorEvent } # Emitted when a file search call is completed (results found). variant :"response.file_search_call.completed", - -> { OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent } + -> { OpenAI::Responses::ResponseFileSearchCallCompletedEvent } # Emitted when a file search call is initiated. variant :"response.file_search_call.in_progress", - -> { OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent } + -> { OpenAI::Responses::ResponseFileSearchCallInProgressEvent } # Emitted when a file search is currently searching. variant :"response.file_search_call.searching", - -> { OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent } + -> { OpenAI::Responses::ResponseFileSearchCallSearchingEvent } # Emitted when there is a partial function-call arguments delta. variant :"response.function_call_arguments.delta", - -> { OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent } + -> { OpenAI::Responses::ResponseFunctionCallArgumentsDeltaEvent } # Emitted when function-call arguments are finalized. variant :"response.function_call_arguments.done", - -> { OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent } + -> { OpenAI::Responses::ResponseFunctionCallArgumentsDoneEvent } # Emitted when the response is in progress. - variant :"response.in_progress", -> { OpenAI::Models::Responses::ResponseInProgressEvent } + variant :"response.in_progress", -> { OpenAI::Responses::ResponseInProgressEvent } # An event that is emitted when a response fails. - variant :"response.failed", -> { OpenAI::Models::Responses::ResponseFailedEvent } + variant :"response.failed", -> { OpenAI::Responses::ResponseFailedEvent } # An event that is emitted when a response finishes as incomplete. - variant :"response.incomplete", -> { OpenAI::Models::Responses::ResponseIncompleteEvent } + variant :"response.incomplete", -> { OpenAI::Responses::ResponseIncompleteEvent } # Emitted when a new output item is added. - variant :"response.output_item.added", -> { OpenAI::Models::Responses::ResponseOutputItemAddedEvent } + variant :"response.output_item.added", -> { OpenAI::Responses::ResponseOutputItemAddedEvent } # Emitted when an output item is marked done. - variant :"response.output_item.done", -> { OpenAI::Models::Responses::ResponseOutputItemDoneEvent } + variant :"response.output_item.done", -> { OpenAI::Responses::ResponseOutputItemDoneEvent } + + # Emitted when a new reasoning summary part is added. + variant :"response.reasoning_summary_part.added", + -> { OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent } + + # Emitted when a reasoning summary part is completed. + variant :"response.reasoning_summary_part.done", + -> { OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent } + + # Emitted when a delta is added to a reasoning summary text. + variant :"response.reasoning_summary_text.delta", + -> { OpenAI::Responses::ResponseReasoningSummaryTextDeltaEvent } + + # Emitted when a reasoning summary text is completed. + variant :"response.reasoning_summary_text.done", + -> { OpenAI::Responses::ResponseReasoningSummaryTextDoneEvent } + + # Emitted when a delta is added to a reasoning text. + variant :"response.reasoning_text.delta", -> { OpenAI::Responses::ResponseReasoningTextDeltaEvent } + + # Emitted when a reasoning text is completed. + variant :"response.reasoning_text.done", -> { OpenAI::Responses::ResponseReasoningTextDoneEvent } # Emitted when there is a partial refusal text. - variant :"response.refusal.delta", -> { OpenAI::Models::Responses::ResponseRefusalDeltaEvent } + variant :"response.refusal.delta", -> { OpenAI::Responses::ResponseRefusalDeltaEvent } # Emitted when refusal text is finalized. - variant :"response.refusal.done", -> { OpenAI::Models::Responses::ResponseRefusalDoneEvent } - - # Emitted when a text annotation is added. - variant :"response.output_text.annotation.added", - -> { OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent } + variant :"response.refusal.done", -> { OpenAI::Responses::ResponseRefusalDoneEvent } # Emitted when there is an additional text delta. - variant :"response.output_text.delta", -> { OpenAI::Models::Responses::ResponseTextDeltaEvent } + variant :"response.output_text.delta", -> { OpenAI::Responses::ResponseTextDeltaEvent } # Emitted when text content is finalized. - variant :"response.output_text.done", -> { OpenAI::Models::Responses::ResponseTextDoneEvent } + variant :"response.output_text.done", -> { OpenAI::Responses::ResponseTextDoneEvent } # Emitted when a web search call is completed. variant :"response.web_search_call.completed", - -> { OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent } + -> { OpenAI::Responses::ResponseWebSearchCallCompletedEvent } # Emitted when a web search call is initiated. variant :"response.web_search_call.in_progress", - -> { OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent } + -> { OpenAI::Responses::ResponseWebSearchCallInProgressEvent } # Emitted when a web search call is executing. variant :"response.web_search_call.searching", - -> { OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent } + -> { OpenAI::Responses::ResponseWebSearchCallSearchingEvent } + + # Emitted when an image generation tool call has completed and the final image is available. + variant :"response.image_generation_call.completed", + -> { OpenAI::Responses::ResponseImageGenCallCompletedEvent } + + # Emitted when an image generation tool call is actively generating an image (intermediate state). + variant :"response.image_generation_call.generating", + -> { OpenAI::Responses::ResponseImageGenCallGeneratingEvent } + + # Emitted when an image generation tool call is in progress. + variant :"response.image_generation_call.in_progress", + -> { OpenAI::Responses::ResponseImageGenCallInProgressEvent } + + # Emitted when a partial image is available during image generation streaming. + variant :"response.image_generation_call.partial_image", + -> { OpenAI::Responses::ResponseImageGenCallPartialImageEvent } + + # Emitted when there is a delta (partial update) to the arguments of an MCP tool call. + variant :"response.mcp_call_arguments.delta", -> { OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent } + + # Emitted when the arguments for an MCP tool call are finalized. + variant :"response.mcp_call_arguments.done", -> { OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent } + + # Emitted when an MCP tool call has completed successfully. + variant :"response.mcp_call.completed", -> { OpenAI::Responses::ResponseMcpCallCompletedEvent } + + # Emitted when an MCP tool call has failed. + variant :"response.mcp_call.failed", -> { OpenAI::Responses::ResponseMcpCallFailedEvent } + + # Emitted when an MCP tool call is in progress. + variant :"response.mcp_call.in_progress", -> { OpenAI::Responses::ResponseMcpCallInProgressEvent } + + # Emitted when the list of available MCP tools has been successfully retrieved. + variant :"response.mcp_list_tools.completed", -> { OpenAI::Responses::ResponseMcpListToolsCompletedEvent } + + # Emitted when the attempt to list available MCP tools has failed. + variant :"response.mcp_list_tools.failed", -> { OpenAI::Responses::ResponseMcpListToolsFailedEvent } + + # Emitted when the system is in the process of retrieving the list of available MCP tools. + variant :"response.mcp_list_tools.in_progress", + -> { OpenAI::Responses::ResponseMcpListToolsInProgressEvent } + + # Emitted when an annotation is added to output text content. + variant :"response.output_text.annotation.added", + -> { OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent } + + # Emitted when a response is queued and waiting to be processed. + variant :"response.queued", -> { OpenAI::Responses::ResponseQueuedEvent } + + # Event representing a delta (partial update) to the input of a custom tool call. + variant :"response.custom_tool_call_input.delta", + -> { OpenAI::Responses::ResponseCustomToolCallInputDeltaEvent } + + # Event indicating that input for a custom tool call is complete. + variant :"response.custom_tool_call_input.done", + -> { OpenAI::Responses::ResponseCustomToolCallInputDoneEvent } + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseReasoningTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDeltaEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDoneEvent)] end end end diff --git a/lib/openai/models/responses/response_text_annotation_delta_event.rb b/lib/openai/models/responses/response_text_annotation_delta_event.rb deleted file mode 100644 index 27041443..00000000 --- a/lib/openai/models/responses/response_text_annotation_delta_event.rb +++ /dev/null @@ -1,196 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - module Models - module Responses - class ResponseTextAnnotationDeltaEvent < OpenAI::BaseModel - # @!attribute annotation - # A citation to a file. - # - # @return [OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath] - required :annotation, - union: -> { OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation } - - # @!attribute annotation_index - # The index of the annotation that was added. - # - # @return [Integer] - required :annotation_index, Integer - - # @!attribute content_index - # The index of the content part that the text annotation was added to. - # - # @return [Integer] - required :content_index, Integer - - # @!attribute item_id - # The ID of the output item that the text annotation was added to. - # - # @return [String] - required :item_id, String - - # @!attribute output_index - # The index of the output item that the text annotation was added to. - # - # @return [Integer] - required :output_index, Integer - - # @!attribute type - # The type of the event. Always `response.output_text.annotation.added`. - # - # @return [Symbol, :"response.output_text.annotation.added"] - required :type, const: :"response.output_text.annotation.added" - - # @!parse - # # Emitted when a text annotation is added. - # # - # # @param annotation [OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath] - # # @param annotation_index [Integer] - # # @param content_index [Integer] - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.output_text.annotation.added"] - # # - # def initialize( - # annotation:, - # annotation_index:, - # content_index:, - # item_id:, - # output_index:, - # type: :"response.output_text.annotation.added", - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract - # - # A citation to a file. - class Annotation < OpenAI::Union - discriminator :type - - # A citation to a file. - variant :file_citation, - -> { OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation } - - # A citation for a web resource used to generate a model response. - variant :url_citation, - -> { OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation } - - # A path to a file. - variant :file_path, - -> { OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath } - - class FileCitation < OpenAI::BaseModel - # @!attribute file_id - # The ID of the file. - # - # @return [String] - required :file_id, String - - # @!attribute index - # The index of the file in the list of files. - # - # @return [Integer] - required :index, Integer - - # @!attribute type - # The type of the file citation. Always `file_citation`. - # - # @return [Symbol, :file_citation] - required :type, const: :file_citation - - # @!parse - # # A citation to a file. - # # - # # @param file_id [String] - # # @param index [Integer] - # # @param type [Symbol, :file_citation] - # # - # def initialize(file_id:, index:, type: :file_citation, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - end - - class URLCitation < OpenAI::BaseModel - # @!attribute end_index - # The index of the last character of the URL citation in the message. - # - # @return [Integer] - required :end_index, Integer - - # @!attribute start_index - # The index of the first character of the URL citation in the message. - # - # @return [Integer] - required :start_index, Integer - - # @!attribute title - # The title of the web resource. - # - # @return [String] - required :title, String - - # @!attribute type - # The type of the URL citation. Always `url_citation`. - # - # @return [Symbol, :url_citation] - required :type, const: :url_citation - - # @!attribute url - # The URL of the web resource. - # - # @return [String] - required :url, String - - # @!parse - # # A citation for a web resource used to generate a model response. - # # - # # @param end_index [Integer] - # # @param start_index [Integer] - # # @param title [String] - # # @param url [String] - # # @param type [Symbol, :url_citation] - # # - # def initialize(end_index:, start_index:, title:, url:, type: :url_citation, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - end - - class FilePath < OpenAI::BaseModel - # @!attribute file_id - # The ID of the file. - # - # @return [String] - required :file_id, String - - # @!attribute index - # The index of the file in the list of files. - # - # @return [Integer] - required :index, Integer - - # @!attribute type - # The type of the file path. Always `file_path`. - # - # @return [Symbol, :file_path] - required :type, const: :file_path - - # @!parse - # # A path to a file. - # # - # # @param file_id [String] - # # @param index [Integer] - # # @param type [Symbol, :file_path] - # # - # def initialize(file_id:, index:, type: :file_path, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - end - end - end - end - end -end diff --git a/lib/openai/models/responses/response_text_config.rb b/lib/openai/models/responses/response_text_config.rb index 77a2d9b0..7017ea8f 100644 --- a/lib/openai/models/responses/response_text_config.rb +++ b/lib/openai/models/responses/response_text_config.rb @@ -3,41 +3,62 @@ module OpenAI module Models module Responses - class ResponseTextConfig < OpenAI::BaseModel - # @!attribute [r] format_ + class ResponseTextConfig < OpenAI::Internal::Type::BaseModel + # @!attribute format_ # An object specifying the format that the model must output. # - # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - # ensures the model will match your supplied JSON schema. Learn more in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). # - # The default format is `{ "type": "text" }` with no additional options. + # The default format is `{ "type": "text" }` with no additional options. # - # **Not recommended for gpt-4o and newer models:** + # **Not recommended for gpt-4o and newer models:** # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. # # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil] - optional :format_, union: -> { OpenAI::Models::Responses::ResponseFormatTextConfig }, api_name: :format + optional :format_, union: -> { OpenAI::Responses::ResponseFormatTextConfig }, api_name: :format - # @!parse - # # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] - # attr_writer :format_ + # @!attribute verbosity + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseTextConfig::Verbosity, nil] + optional :verbosity, enum: -> { OpenAI::Responses::ResponseTextConfig::Verbosity }, nil?: true + + # @!method initialize(format_: nil, verbosity: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseTextConfig} for more details. + # + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # + # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. + # + # @param verbosity [Symbol, OpenAI::Models::Responses::ResponseTextConfig::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in + + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + # + # @see OpenAI::Models::Responses::ResponseTextConfig#verbosity + module Verbosity + extend OpenAI::Internal::Type::Enum - # @!parse - # # Configuration options for a text response from the model. Can be plain text or - # # structured JSON data. Learn more: - # # - # # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - # # - # # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] - # # - # def initialize(format_: nil, **) = super + LOW = :low + MEDIUM = :medium + HIGH = :high - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method self.values + # @return [Array] + end end end end diff --git a/lib/openai/models/responses/response_text_delta_event.rb b/lib/openai/models/responses/response_text_delta_event.rb index 5dee2ee1..9701d97c 100644 --- a/lib/openai/models/responses/response_text_delta_event.rb +++ b/lib/openai/models/responses/response_text_delta_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseTextDeltaEvent < OpenAI::BaseModel + class ResponseTextDeltaEvent < OpenAI::Internal::Type::BaseModel # @!attribute content_index # The index of the content part that the text delta was added to. # @@ -22,30 +22,104 @@ class ResponseTextDeltaEvent < OpenAI::BaseModel # @return [String] required :item_id, String + # @!attribute logprobs + # The log probabilities of the tokens in the delta. + # + # @return [Array] + required :logprobs, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob] } + # @!attribute output_index # The index of the output item that the text delta was added to. # # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number for this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.output_text.delta`. # # @return [Symbol, :"response.output_text.delta"] required :type, const: :"response.output_text.delta" - # @!parse - # # Emitted when there is an additional text delta. - # # - # # @param content_index [Integer] - # # @param delta [String] - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.output_text.delta"] - # # - # def initialize(content_index:, delta:, item_id:, output_index:, type: :"response.output_text.delta", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(content_index:, delta:, item_id:, logprobs:, output_index:, sequence_number:, type: :"response.output_text.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseTextDeltaEvent} for more details. + # + # Emitted when there is an additional text delta. + # + # @param content_index [Integer] The index of the content part that the text delta was added to. + # + # @param delta [String] The text delta that was added. + # + # @param item_id [String] The ID of the output item that the text delta was added to. + # + # @param logprobs [Array] The log probabilities of the tokens in the delta. + # + # @param output_index [Integer] The index of the output item that the text delta was added to. + # + # @param sequence_number [Integer] The sequence number for this event. + # + # @param type [Symbol, :"response.output_text.delta"] The type of the event. Always `response.output_text.delta`. + + class Logprob < OpenAI::Internal::Type::BaseModel + # @!attribute token + # A possible text token. + # + # @return [String] + required :token, String + + # @!attribute logprob + # The log probability of this token. + # + # @return [Float] + required :logprob, Float + + # @!attribute top_logprobs + # The log probability of the top 20 most likely tokens. + # + # @return [Array, nil] + optional :top_logprobs, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob] } + + # @!method initialize(token:, logprob:, top_logprobs: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob} for more details. + # + # A logprob is the logarithmic probability that the model assigns to producing a + # particular token at a given position in the sequence. Less-negative (higher) + # logprob values indicate greater model confidence in that token choice. + # + # @param token [String] A possible text token. + # + # @param logprob [Float] The log probability of this token. + # + # @param top_logprobs [Array] The log probability of the top 20 most likely tokens. + + class TopLogprob < OpenAI::Internal::Type::BaseModel + # @!attribute token + # A possible text token. + # + # @return [String, nil] + optional :token, String + + # @!attribute logprob + # The log probability of this token. + # + # @return [Float, nil] + optional :logprob, Float + + # @!method initialize(token: nil, logprob: nil) + # @param token [String] A possible text token. + # + # @param logprob [Float] The log probability of this token. + end + end end end end diff --git a/lib/openai/models/responses/response_text_done_event.rb b/lib/openai/models/responses/response_text_done_event.rb index cd3d0faf..77110a2a 100644 --- a/lib/openai/models/responses/response_text_done_event.rb +++ b/lib/openai/models/responses/response_text_done_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseTextDoneEvent < OpenAI::BaseModel + class ResponseTextDoneEvent < OpenAI::Internal::Type::BaseModel # @!attribute content_index # The index of the content part that the text content is finalized. # @@ -16,12 +16,25 @@ class ResponseTextDoneEvent < OpenAI::BaseModel # @return [String] required :item_id, String + # @!attribute logprobs + # The log probabilities of the tokens in the delta. + # + # @return [Array] + required :logprobs, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob] } + # @!attribute output_index # The index of the output item that the text content is finalized. # # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number for this event. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute text # The text content that is finalized. # @@ -34,18 +47,79 @@ class ResponseTextDoneEvent < OpenAI::BaseModel # @return [Symbol, :"response.output_text.done"] required :type, const: :"response.output_text.done" - # @!parse - # # Emitted when text content is finalized. - # # - # # @param content_index [Integer] - # # @param item_id [String] - # # @param output_index [Integer] - # # @param text [String] - # # @param type [Symbol, :"response.output_text.done"] - # # - # def initialize(content_index:, item_id:, output_index:, text:, type: :"response.output_text.done", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(content_index:, item_id:, logprobs:, output_index:, sequence_number:, text:, type: :"response.output_text.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseTextDoneEvent} for more details. + # + # Emitted when text content is finalized. + # + # @param content_index [Integer] The index of the content part that the text content is finalized. + # + # @param item_id [String] The ID of the output item that the text content is finalized. + # + # @param logprobs [Array] The log probabilities of the tokens in the delta. + # + # @param output_index [Integer] The index of the output item that the text content is finalized. + # + # @param sequence_number [Integer] The sequence number for this event. + # + # @param text [String] The text content that is finalized. + # + # @param type [Symbol, :"response.output_text.done"] The type of the event. Always `response.output_text.done`. + + class Logprob < OpenAI::Internal::Type::BaseModel + # @!attribute token + # A possible text token. + # + # @return [String] + required :token, String + + # @!attribute logprob + # The log probability of this token. + # + # @return [Float] + required :logprob, Float + + # @!attribute top_logprobs + # The log probability of the top 20 most likely tokens. + # + # @return [Array, nil] + optional :top_logprobs, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob] } + + # @!method initialize(token:, logprob:, top_logprobs: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob} for more details. + # + # A logprob is the logarithmic probability that the model assigns to producing a + # particular token at a given position in the sequence. Less-negative (higher) + # logprob values indicate greater model confidence in that token choice. + # + # @param token [String] A possible text token. + # + # @param logprob [Float] The log probability of this token. + # + # @param top_logprobs [Array] The log probability of the top 20 most likely tokens. + + class TopLogprob < OpenAI::Internal::Type::BaseModel + # @!attribute token + # A possible text token. + # + # @return [String, nil] + optional :token, String + + # @!attribute logprob + # The log probability of this token. + # + # @return [Float, nil] + optional :logprob, Float + + # @!method initialize(token: nil, logprob: nil) + # @param token [String] A possible text token. + # + # @param logprob [Float] The log probability of this token. + end + end end end end diff --git a/lib/openai/models/responses/response_usage.rb b/lib/openai/models/responses/response_usage.rb index 198bb4bd..bd574dfa 100644 --- a/lib/openai/models/responses/response_usage.rb +++ b/lib/openai/models/responses/response_usage.rb @@ -3,13 +3,19 @@ module OpenAI module Models module Responses - class ResponseUsage < OpenAI::BaseModel + class ResponseUsage < OpenAI::Internal::Type::BaseModel # @!attribute input_tokens # The number of input tokens. # # @return [Integer] required :input_tokens, Integer + # @!attribute input_tokens_details + # A detailed breakdown of the input tokens. + # + # @return [OpenAI::Models::Responses::ResponseUsage::InputTokensDetails] + required :input_tokens_details, -> { OpenAI::Responses::ResponseUsage::InputTokensDetails } + # @!attribute output_tokens # The number of output tokens. # @@ -20,7 +26,7 @@ class ResponseUsage < OpenAI::BaseModel # A detailed breakdown of the output tokens. # # @return [OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails] - required :output_tokens_details, -> { OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails } + required :output_tokens_details, -> { OpenAI::Responses::ResponseUsage::OutputTokensDetails } # @!attribute total_tokens # The total number of tokens used. @@ -28,34 +34,50 @@ class ResponseUsage < OpenAI::BaseModel # @return [Integer] required :total_tokens, Integer - # @!parse - # # Represents token usage details including input tokens, output tokens, a - # # breakdown of output tokens, and the total tokens used. - # # - # # @param input_tokens [Integer] - # # @param output_tokens [Integer] - # # @param output_tokens_details [OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails] - # # @param total_tokens [Integer] - # # - # def initialize(input_tokens:, output_tokens:, output_tokens_details:, total_tokens:, **) = super + # @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, output_tokens_details:, total_tokens:) + # Represents token usage details including input tokens, output tokens, a + # breakdown of output tokens, and the total tokens used. + # + # @param input_tokens [Integer] The number of input tokens. + # + # @param input_tokens_details [OpenAI::Models::Responses::ResponseUsage::InputTokensDetails] A detailed breakdown of the input tokens. + # + # @param output_tokens [Integer] The number of output tokens. + # + # @param output_tokens_details [OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails] A detailed breakdown of the output tokens. + # + # @param total_tokens [Integer] The total number of tokens used. - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @see OpenAI::Models::Responses::ResponseUsage#input_tokens_details + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + # @!attribute cached_tokens + # The number of tokens that were retrieved from the cache. + # [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + # + # @return [Integer] + required :cached_tokens, Integer - class OutputTokensDetails < OpenAI::BaseModel + # @!method initialize(cached_tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseUsage::InputTokensDetails} for more details. + # + # A detailed breakdown of the input tokens. + # + # @param cached_tokens [Integer] The number of tokens that were retrieved from the cache. + end + + # @see OpenAI::Models::Responses::ResponseUsage#output_tokens_details + class OutputTokensDetails < OpenAI::Internal::Type::BaseModel # @!attribute reasoning_tokens # The number of reasoning tokens. # # @return [Integer] required :reasoning_tokens, Integer - # @!parse - # # A detailed breakdown of the output tokens. - # # - # # @param reasoning_tokens [Integer] - # # - # def initialize(reasoning_tokens:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(reasoning_tokens:) + # A detailed breakdown of the output tokens. + # + # @param reasoning_tokens [Integer] The number of reasoning tokens. end end end diff --git a/lib/openai/models/responses/response_web_search_call_completed_event.rb b/lib/openai/models/responses/response_web_search_call_completed_event.rb index 08386de1..91ae2f37 100644 --- a/lib/openai/models/responses/response_web_search_call_completed_event.rb +++ b/lib/openai/models/responses/response_web_search_call_completed_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseWebSearchCallCompletedEvent < OpenAI::BaseModel + class ResponseWebSearchCallCompletedEvent < OpenAI::Internal::Type::BaseModel # @!attribute item_id # Unique ID for the output item associated with the web search call. # @@ -16,22 +16,32 @@ class ResponseWebSearchCallCompletedEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of the web search call being processed. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.web_search_call.completed`. # # @return [Symbol, :"response.web_search_call.completed"] required :type, const: :"response.web_search_call.completed" - # @!parse - # # Emitted when a web search call is completed. - # # - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.web_search_call.completed"] - # # - # def initialize(item_id:, output_index:, type: :"response.web_search_call.completed", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.web_search_call.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent} for more + # details. + # + # Emitted when a web search call is completed. + # + # @param item_id [String] Unique ID for the output item associated with the web search call. + # + # @param output_index [Integer] The index of the output item that the web search call is associated with. + # + # @param sequence_number [Integer] The sequence number of the web search call being processed. + # + # @param type [Symbol, :"response.web_search_call.completed"] The type of the event. Always `response.web_search_call.completed`. end end end diff --git a/lib/openai/models/responses/response_web_search_call_in_progress_event.rb b/lib/openai/models/responses/response_web_search_call_in_progress_event.rb index 0770e80c..f00f81c0 100644 --- a/lib/openai/models/responses/response_web_search_call_in_progress_event.rb +++ b/lib/openai/models/responses/response_web_search_call_in_progress_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseWebSearchCallInProgressEvent < OpenAI::BaseModel + class ResponseWebSearchCallInProgressEvent < OpenAI::Internal::Type::BaseModel # @!attribute item_id # Unique ID for the output item associated with the web search call. # @@ -16,22 +16,32 @@ class ResponseWebSearchCallInProgressEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of the web search call being processed. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.web_search_call.in_progress`. # # @return [Symbol, :"response.web_search_call.in_progress"] required :type, const: :"response.web_search_call.in_progress" - # @!parse - # # Emitted when a web search call is initiated. - # # - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.web_search_call.in_progress"] - # # - # def initialize(item_id:, output_index:, type: :"response.web_search_call.in_progress", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.web_search_call.in_progress") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent} for more + # details. + # + # Emitted when a web search call is initiated. + # + # @param item_id [String] Unique ID for the output item associated with the web search call. + # + # @param output_index [Integer] The index of the output item that the web search call is associated with. + # + # @param sequence_number [Integer] The sequence number of the web search call being processed. + # + # @param type [Symbol, :"response.web_search_call.in_progress"] The type of the event. Always `response.web_search_call.in_progress`. end end end diff --git a/lib/openai/models/responses/response_web_search_call_searching_event.rb b/lib/openai/models/responses/response_web_search_call_searching_event.rb index 6d037de9..d1552a07 100644 --- a/lib/openai/models/responses/response_web_search_call_searching_event.rb +++ b/lib/openai/models/responses/response_web_search_call_searching_event.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ResponseWebSearchCallSearchingEvent < OpenAI::BaseModel + class ResponseWebSearchCallSearchingEvent < OpenAI::Internal::Type::BaseModel # @!attribute item_id # Unique ID for the output item associated with the web search call. # @@ -16,22 +16,32 @@ class ResponseWebSearchCallSearchingEvent < OpenAI::BaseModel # @return [Integer] required :output_index, Integer + # @!attribute sequence_number + # The sequence number of the web search call being processed. + # + # @return [Integer] + required :sequence_number, Integer + # @!attribute type # The type of the event. Always `response.web_search_call.searching`. # # @return [Symbol, :"response.web_search_call.searching"] required :type, const: :"response.web_search_call.searching" - # @!parse - # # Emitted when a web search call is executing. - # # - # # @param item_id [String] - # # @param output_index [Integer] - # # @param type [Symbol, :"response.web_search_call.searching"] - # # - # def initialize(item_id:, output_index:, type: :"response.web_search_call.searching", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.web_search_call.searching") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent} for more + # details. + # + # Emitted when a web search call is executing. + # + # @param item_id [String] Unique ID for the output item associated with the web search call. + # + # @param output_index [Integer] The index of the output item that the web search call is associated with. + # + # @param sequence_number [Integer] The sequence number of the web search call being processed. + # + # @param type [Symbol, :"response.web_search_call.searching"] The type of the event. Always `response.web_search_call.searching`. end end end diff --git a/lib/openai/models/responses/tool.rb b/lib/openai/models/responses/tool.rb index a97afcb0..37feec46 100644 --- a/lib/openai/models/responses/tool.rb +++ b/lib/openai/models/responses/tool.rb @@ -3,29 +3,659 @@ module OpenAI module Models module Responses - # @abstract - # - # A tool that searches for relevant content from uploaded files. Learn more about - # the - # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). - class Tool < OpenAI::Union + # A tool that can be used to generate a response. + module Tool + extend OpenAI::Internal::Type::Union + discriminator :type - # A tool that searches for relevant content from uploaded files. - # Learn more about the [file search tool](https://platform.openai.com/docs/guides/tools-file-search). - variant :file_search, -> { OpenAI::Models::Responses::FileSearchTool } + # Defines a function in your own code the model can choose to call. Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + variant :function, -> { OpenAI::Responses::FunctionTool } + + # A tool that searches for relevant content from uploaded files. Learn more about the [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + variant :file_search, -> { OpenAI::Responses::FileSearchTool } + + # A tool that controls a virtual computer. Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + variant :computer_use_preview, -> { OpenAI::Responses::ComputerTool } + + # Give the model access to additional tools via remote Model Context Protocol + # (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + variant :mcp, -> { OpenAI::Responses::Tool::Mcp } + + # A tool that runs Python code to help generate a response to a prompt. + variant :code_interpreter, -> { OpenAI::Responses::Tool::CodeInterpreter } + + # A tool that generates images using a model like `gpt-image-1`. + variant :image_generation, -> { OpenAI::Responses::Tool::ImageGeneration } + + # A tool that allows the model to execute shell commands in a local environment. + variant :local_shell, -> { OpenAI::Responses::Tool::LocalShell } + + # A custom tool that processes input using a specified format. Learn more about + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools). + variant :custom, -> { OpenAI::Responses::CustomTool } + + # This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + variant -> { OpenAI::Responses::WebSearchTool } + + class Mcp < OpenAI::Internal::Type::BaseModel + # @!attribute server_label + # A label for this MCP server, used to identify it in tool calls. + # + # @return [String] + required :server_label, String + + # @!attribute type + # The type of the MCP tool. Always `mcp`. + # + # @return [Symbol, :mcp] + required :type, const: :mcp + + # @!attribute allowed_tools + # List of allowed tool names or a filter object. + # + # @return [Array, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpToolFilter, nil] + optional :allowed_tools, union: -> { OpenAI::Responses::Tool::Mcp::AllowedTools }, nil?: true + + # @!attribute authorization + # An OAuth access token that can be used with a remote MCP server, either with a + # custom MCP server URL or a service connector. Your application must handle the + # OAuth authorization flow and provide the token here. + # + # @return [String, nil] + optional :authorization, String + + # @!attribute connector_id + # Identifier for service connectors, like those available in ChatGPT. One of + # `server_url` or `connector_id` must be provided. Learn more about service + # connectors + # [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + # + # Currently supported `connector_id` values are: + # + # - Dropbox: `connector_dropbox` + # - Gmail: `connector_gmail` + # - Google Calendar: `connector_googlecalendar` + # - Google Drive: `connector_googledrive` + # - Microsoft Teams: `connector_microsoftteams` + # - Outlook Calendar: `connector_outlookcalendar` + # - Outlook Email: `connector_outlookemail` + # - SharePoint: `connector_sharepoint` + # + # @return [Symbol, OpenAI::Models::Responses::Tool::Mcp::ConnectorID, nil] + optional :connector_id, enum: -> { OpenAI::Responses::Tool::Mcp::ConnectorID } + + # @!attribute headers + # Optional HTTP headers to send to the MCP server. Use for authentication or other + # purposes. + # + # @return [Hash{Symbol=>String}, nil] + optional :headers, OpenAI::Internal::Type::HashOf[String], nil?: true + + # @!attribute require_approval + # Specify which of the MCP server's tools require approval. + # + # @return [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting, nil] + optional :require_approval, union: -> { OpenAI::Responses::Tool::Mcp::RequireApproval }, nil?: true + + # @!attribute server_description + # Optional description of the MCP server, used to provide more context. + # + # @return [String, nil] + optional :server_description, String + + # @!attribute server_url + # The URL for the MCP server. One of `server_url` or `connector_id` must be + # provided. + # + # @return [String, nil] + optional :server_url, String + + # @!method initialize(server_label:, allowed_tools: nil, authorization: nil, connector_id: nil, headers: nil, require_approval: nil, server_description: nil, server_url: nil, type: :mcp) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::Tool::Mcp} for more details. + # + # Give the model access to additional tools via remote Model Context Protocol + # (MCP) servers. + # [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + # + # @param server_label [String] A label for this MCP server, used to identify it in tool calls. + # + # @param allowed_tools [Array, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpToolFilter, nil] List of allowed tool names or a filter object. + # + # @param authorization [String] An OAuth access token that can be used with a remote MCP server, either + # + # @param connector_id [Symbol, OpenAI::Models::Responses::Tool::Mcp::ConnectorID] Identifier for service connectors, like those available in ChatGPT. One of + # + # @param headers [Hash{Symbol=>String}, nil] Optional HTTP headers to send to the MCP server. Use for authentication + # + # @param require_approval [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting, nil] Specify which of the MCP server's tools require approval. + # + # @param server_description [String] Optional description of the MCP server, used to provide more context. + # + # @param server_url [String] The URL for the MCP server. One of `server_url` or `connector_id` must be + # + # @param type [Symbol, :mcp] The type of the MCP tool. Always `mcp`. + + # List of allowed tool names or a filter object. + # + # @see OpenAI::Models::Responses::Tool::Mcp#allowed_tools + module AllowedTools + extend OpenAI::Internal::Type::Union + + # A string array of allowed tool names + variant -> { OpenAI::Models::Responses::Tool::Mcp::AllowedTools::StringArray } + + # A filter object to specify which tools are allowed. + variant -> { OpenAI::Responses::Tool::Mcp::AllowedTools::McpToolFilter } + + class McpToolFilter < OpenAI::Internal::Type::BaseModel + # @!attribute read_only + # Indicates whether or not a tool modifies data or is read-only. If an MCP server + # is + # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + # it will match this filter. + # + # @return [Boolean, nil] + optional :read_only, OpenAI::Internal::Type::Boolean + + # @!attribute tool_names + # List of allowed tool names. + # + # @return [Array, nil] + optional :tool_names, OpenAI::Internal::Type::ArrayOf[String] + + # @!method initialize(read_only: nil, tool_names: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpToolFilter} for more + # details. + # + # A filter object to specify which tools are allowed. + # + # @param read_only [Boolean] Indicates whether or not a tool modifies data or is read-only. If an + # + # @param tool_names [Array] List of allowed tool names. + end + + # @!method self.variants + # @return [Array(Array, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpToolFilter)] + + # @type [OpenAI::Internal::Type::Converter] + StringArray = OpenAI::Internal::Type::ArrayOf[String] + end + + # Identifier for service connectors, like those available in ChatGPT. One of + # `server_url` or `connector_id` must be provided. Learn more about service + # connectors + # [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + # + # Currently supported `connector_id` values are: + # + # - Dropbox: `connector_dropbox` + # - Gmail: `connector_gmail` + # - Google Calendar: `connector_googlecalendar` + # - Google Drive: `connector_googledrive` + # - Microsoft Teams: `connector_microsoftteams` + # - Outlook Calendar: `connector_outlookcalendar` + # - Outlook Email: `connector_outlookemail` + # - SharePoint: `connector_sharepoint` + # + # @see OpenAI::Models::Responses::Tool::Mcp#connector_id + module ConnectorID + extend OpenAI::Internal::Type::Enum + + CONNECTOR_DROPBOX = :connector_dropbox + CONNECTOR_GMAIL = :connector_gmail + CONNECTOR_GOOGLECALENDAR = :connector_googlecalendar + CONNECTOR_GOOGLEDRIVE = :connector_googledrive + CONNECTOR_MICROSOFTTEAMS = :connector_microsoftteams + CONNECTOR_OUTLOOKCALENDAR = :connector_outlookcalendar + CONNECTOR_OUTLOOKEMAIL = :connector_outlookemail + CONNECTOR_SHAREPOINT = :connector_sharepoint + + # @!method self.values + # @return [Array] + end + + # Specify which of the MCP server's tools require approval. + # + # @see OpenAI::Models::Responses::Tool::Mcp#require_approval + module RequireApproval + extend OpenAI::Internal::Type::Union + + # Specify which of the MCP server's tools require approval. Can be + # `always`, `never`, or a filter object associated with tools + # that require approval. + variant -> { OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter } + + # Specify a single approval policy for all tools. One of `always` or + # `never`. When set to `always`, all tools will require approval. When + # set to `never`, all tools will not require approval. + variant enum: -> { OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting } + + class McpToolApprovalFilter < OpenAI::Internal::Type::BaseModel + # @!attribute always + # A filter object to specify which tools are allowed. + # + # @return [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always, nil] + optional :always, -> { OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always } + + # @!attribute never + # A filter object to specify which tools are allowed. + # + # @return [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never, nil] + optional :never, -> { OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never } + + # @!method initialize(always: nil, never: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter} + # for more details. + # + # Specify which of the MCP server's tools require approval. Can be `always`, + # `never`, or a filter object associated with tools that require approval. + # + # @param always [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always] A filter object to specify which tools are allowed. + # + # @param never [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never] A filter object to specify which tools are allowed. + + # @see OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter#always + class Always < OpenAI::Internal::Type::BaseModel + # @!attribute read_only + # Indicates whether or not a tool modifies data or is read-only. If an MCP server + # is + # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + # it will match this filter. + # + # @return [Boolean, nil] + optional :read_only, OpenAI::Internal::Type::Boolean + + # @!attribute tool_names + # List of allowed tool names. + # + # @return [Array, nil] + optional :tool_names, OpenAI::Internal::Type::ArrayOf[String] + + # @!method initialize(read_only: nil, tool_names: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always} + # for more details. + # + # A filter object to specify which tools are allowed. + # + # @param read_only [Boolean] Indicates whether or not a tool modifies data or is read-only. If an + # + # @param tool_names [Array] List of allowed tool names. + end + + # @see OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter#never + class Never < OpenAI::Internal::Type::BaseModel + # @!attribute read_only + # Indicates whether or not a tool modifies data or is read-only. If an MCP server + # is + # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + # it will match this filter. + # + # @return [Boolean, nil] + optional :read_only, OpenAI::Internal::Type::Boolean + + # @!attribute tool_names + # List of allowed tool names. + # + # @return [Array, nil] + optional :tool_names, OpenAI::Internal::Type::ArrayOf[String] + + # @!method initialize(read_only: nil, tool_names: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never} + # for more details. + # + # A filter object to specify which tools are allowed. + # + # @param read_only [Boolean] Indicates whether or not a tool modifies data or is read-only. If an + # + # @param tool_names [Array] List of allowed tool names. + end + end + + # Specify a single approval policy for all tools. One of `always` or `never`. When + # set to `always`, all tools will require approval. When set to `never`, all tools + # will not require approval. + module McpToolApprovalSetting + extend OpenAI::Internal::Type::Enum + + ALWAYS = :always + NEVER = :never + + # @!method self.values + # @return [Array] + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting)] + end + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + # @!attribute container + # The code interpreter container. Can be a container ID or an object that + # specifies uploaded file IDs to make available to your code. + # + # @return [String, OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto] + required :container, union: -> { OpenAI::Responses::Tool::CodeInterpreter::Container } + + # @!attribute type + # The type of the code interpreter tool. Always `code_interpreter`. + # + # @return [Symbol, :code_interpreter] + required :type, const: :code_interpreter + + # @!method initialize(container:, type: :code_interpreter) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::Tool::CodeInterpreter} for more details. + # + # A tool that runs Python code to help generate a response to a prompt. + # + # @param container [String, OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto] The code interpreter container. Can be a container ID or an object that + # + # @param type [Symbol, :code_interpreter] The type of the code interpreter tool. Always `code_interpreter`. + + # The code interpreter container. Can be a container ID or an object that + # specifies uploaded file IDs to make available to your code. + # + # @see OpenAI::Models::Responses::Tool::CodeInterpreter#container + module Container + extend OpenAI::Internal::Type::Union + + # The container ID. + variant String + + # Configuration for a code interpreter container. Optionally specify the IDs + # of the files to run the code on. + variant -> { OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto } + + class CodeInterpreterToolAuto < OpenAI::Internal::Type::BaseModel + # @!attribute type + # Always `auto`. + # + # @return [Symbol, :auto] + required :type, const: :auto + + # @!attribute file_ids + # An optional list of uploaded files to make available to your code. + # + # @return [Array, nil] + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] + + # @!method initialize(file_ids: nil, type: :auto) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto} + # for more details. + # + # Configuration for a code interpreter container. Optionally specify the IDs of + # the files to run the code on. + # + # @param file_ids [Array] An optional list of uploaded files to make available to your code. + # + # @param type [Symbol, :auto] Always `auto`. + end + + # @!method self.variants + # @return [Array(String, OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto)] + end + end + + class ImageGeneration < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of the image generation tool. Always `image_generation`. + # + # @return [Symbol, :image_generation] + required :type, const: :image_generation + + # @!attribute background + # Background type for the generated image. One of `transparent`, `opaque`, or + # `auto`. Default: `auto`. + # + # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background, nil] + optional :background, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Background } + + # @!attribute input_fidelity + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + # + # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::InputFidelity, nil] + optional :input_fidelity, + enum: -> { + OpenAI::Responses::Tool::ImageGeneration::InputFidelity + }, + nil?: true + + # @!attribute input_image_mask + # Optional mask for inpainting. Contains `image_url` (string, optional) and + # `file_id` (string, optional). + # + # @return [OpenAI::Models::Responses::Tool::ImageGeneration::InputImageMask, nil] + optional :input_image_mask, -> { OpenAI::Responses::Tool::ImageGeneration::InputImageMask } + + # @!attribute model + # The image generation model to use. Default: `gpt-image-1`. + # + # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Model, nil] + optional :model, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Model } + + # @!attribute moderation + # Moderation level for the generated image. Default: `auto`. + # + # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Moderation, nil] + optional :moderation, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Moderation } + + # @!attribute output_compression + # Compression level for the output image. Default: 100. + # + # @return [Integer, nil] + optional :output_compression, Integer + + # @!attribute output_format + # The output format of the generated image. One of `png`, `webp`, or `jpeg`. + # Default: `png`. + # + # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::OutputFormat, nil] + optional :output_format, enum: -> { OpenAI::Responses::Tool::ImageGeneration::OutputFormat } + + # @!attribute partial_images + # Number of partial images to generate in streaming mode, from 0 (default value) + # to 3. + # + # @return [Integer, nil] + optional :partial_images, Integer + + # @!attribute quality + # The quality of the generated image. One of `low`, `medium`, `high`, or `auto`. + # Default: `auto`. + # + # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Quality, nil] + optional :quality, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Quality } + + # @!attribute size + # The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`, + # or `auto`. Default: `auto`. + # + # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Size, nil] + optional :size, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Size } + + # @!method initialize(background: nil, input_fidelity: nil, input_image_mask: nil, model: nil, moderation: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, size: nil, type: :image_generation) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::Tool::ImageGeneration} for more details. + # + # A tool that generates images using a model like `gpt-image-1`. + # + # @param background [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background] Background type for the generated image. One of `transparent`, + # + # @param input_fidelity [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::InputFidelity, nil] Control how much effort the model will exert to match the style and features, + # + # @param input_image_mask [OpenAI::Models::Responses::Tool::ImageGeneration::InputImageMask] Optional mask for inpainting. Contains `image_url` + # + # @param model [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Model] The image generation model to use. Default: `gpt-image-1`. + # + # @param moderation [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Moderation] Moderation level for the generated image. Default: `auto`. + # + # @param output_compression [Integer] Compression level for the output image. Default: 100. + # + # @param output_format [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::OutputFormat] The output format of the generated image. One of `png`, `webp`, or + # + # @param partial_images [Integer] Number of partial images to generate in streaming mode, from 0 (default value) t + # + # @param quality [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Quality] The quality of the generated image. One of `low`, `medium`, `high`, + # + # @param size [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Size] The size of the generated image. One of `1024x1024`, `1024x1536`, + # + # @param type [Symbol, :image_generation] The type of the image generation tool. Always `image_generation`. + + # Background type for the generated image. One of `transparent`, `opaque`, or + # `auto`. Default: `auto`. + # + # @see OpenAI::Models::Responses::Tool::ImageGeneration#background + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT = :transparent + OPAQUE = :opaque + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + # + # @see OpenAI::Models::Responses::Tool::ImageGeneration#input_fidelity + module InputFidelity + extend OpenAI::Internal::Type::Enum + + HIGH = :high + LOW = :low + + # @!method self.values + # @return [Array] + end + + # @see OpenAI::Models::Responses::Tool::ImageGeneration#input_image_mask + class InputImageMask < OpenAI::Internal::Type::BaseModel + # @!attribute file_id + # File ID for the mask image. + # + # @return [String, nil] + optional :file_id, String + + # @!attribute image_url + # Base64-encoded mask image. + # + # @return [String, nil] + optional :image_url, String + + # @!method initialize(file_id: nil, image_url: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::Tool::ImageGeneration::InputImageMask} for more + # details. + # + # Optional mask for inpainting. Contains `image_url` (string, optional) and + # `file_id` (string, optional). + # + # @param file_id [String] File ID for the mask image. + # + # @param image_url [String] Base64-encoded mask image. + end + + # The image generation model to use. Default: `gpt-image-1`. + # + # @see OpenAI::Models::Responses::Tool::ImageGeneration#model + module Model + extend OpenAI::Internal::Type::Enum + + GPT_IMAGE_1 = :"gpt-image-1" + + # @!method self.values + # @return [Array] + end + + # Moderation level for the generated image. Default: `auto`. + # + # @see OpenAI::Models::Responses::Tool::ImageGeneration#moderation + module Moderation + extend OpenAI::Internal::Type::Enum + + AUTO = :auto + LOW = :low + + # @!method self.values + # @return [Array] + end + + # The output format of the generated image. One of `png`, `webp`, or `jpeg`. + # Default: `png`. + # + # @see OpenAI::Models::Responses::Tool::ImageGeneration#output_format + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG = :png + WEBP = :webp + JPEG = :jpeg + + # @!method self.values + # @return [Array] + end + + # The quality of the generated image. One of `low`, `medium`, `high`, or `auto`. + # Default: `auto`. + # + # @see OpenAI::Models::Responses::Tool::ImageGeneration#quality + module Quality + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + AUTO = :auto + + # @!method self.values + # @return [Array] + end + + # The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`, + # or `auto`. Default: `auto`. + # + # @see OpenAI::Models::Responses::Tool::ImageGeneration#size + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024 = :"1024x1024" + SIZE_1024X1536 = :"1024x1536" + SIZE_1536X1024 = :"1536x1024" + AUTO = :auto + + # @!method self.values + # @return [Array] + end + end - # Defines a function in your own code the model can choose to call. Learn more - # about [function calling](https://platform.openai.com/docs/guides/function-calling). - variant :function, -> { OpenAI::Models::Responses::FunctionTool } + class LocalShell < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of the local shell tool. Always `local_shell`. + # + # @return [Symbol, :local_shell] + required :type, const: :local_shell - # A tool that controls a virtual computer. Learn more about the - # [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). - variant :computer_use_preview, -> { OpenAI::Models::Responses::ComputerTool } + # @!method initialize(type: :local_shell) + # A tool that allows the model to execute shell commands in a local environment. + # + # @param type [Symbol, :local_shell] The type of the local shell tool. Always `local_shell`. + end - # This tool searches the web for relevant results to use in a response. - # Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search). - variant -> { OpenAI::Models::Responses::WebSearchTool } + # @!method self.variants + # @return [Array(OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool)] end end end diff --git a/lib/openai/models/responses/tool_choice_allowed.rb b/lib/openai/models/responses/tool_choice_allowed.rb new file mode 100644 index 00000000..228e4611 --- /dev/null +++ b/lib/openai/models/responses/tool_choice_allowed.rb @@ -0,0 +1,73 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ToolChoiceAllowed < OpenAI::Internal::Type::BaseModel + # @!attribute mode + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + # + # @return [Symbol, OpenAI::Models::Responses::ToolChoiceAllowed::Mode] + required :mode, enum: -> { OpenAI::Responses::ToolChoiceAllowed::Mode } + + # @!attribute tools + # A list of tool definitions that the model should be allowed to call. + # + # For the Responses API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "name": "get_weather" }, + # { "type": "mcp", "server_label": "deepwiki" }, + # { "type": "image_generation" } + # ] + # ``` + # + # @return [ArrayObject}>] + required :tools, + OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]] + + # @!attribute type + # Allowed tool configuration type. Always `allowed_tools`. + # + # @return [Symbol, :allowed_tools] + required :type, const: :allowed_tools + + # @!method initialize(mode:, tools:, type: :allowed_tools) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ToolChoiceAllowed} for more details. + # + # Constrains the tools available to the model to a pre-defined set. + # + # @param mode [Symbol, OpenAI::Models::Responses::ToolChoiceAllowed::Mode] Constrains the tools available to the model to a pre-defined set. + # + # @param tools [ArrayObject}>] A list of tool definitions that the model should be allowed to call. + # + # @param type [Symbol, :allowed_tools] Allowed tool configuration type. Always `allowed_tools`. + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + # + # @see OpenAI::Models::Responses::ToolChoiceAllowed#mode + module Mode + extend OpenAI::Internal::Type::Enum + + AUTO = :auto + REQUIRED = :required + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/responses/tool_choice_custom.rb b/lib/openai/models/responses/tool_choice_custom.rb new file mode 100644 index 00000000..310413ed --- /dev/null +++ b/lib/openai/models/responses/tool_choice_custom.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ToolChoiceCustom < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the custom tool to call. + # + # @return [String] + required :name, String + + # @!attribute type + # For custom tool calling, the type is always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!method initialize(name:, type: :custom) + # Use this option to force the model to call a specific custom tool. + # + # @param name [String] The name of the custom tool to call. + # + # @param type [Symbol, :custom] For custom tool calling, the type is always `custom`. + end + end + end +end diff --git a/lib/openai/models/responses/tool_choice_function.rb b/lib/openai/models/responses/tool_choice_function.rb index 08f9d77e..67f00750 100644 --- a/lib/openai/models/responses/tool_choice_function.rb +++ b/lib/openai/models/responses/tool_choice_function.rb @@ -3,7 +3,7 @@ module OpenAI module Models module Responses - class ToolChoiceFunction < OpenAI::BaseModel + class ToolChoiceFunction < OpenAI::Internal::Type::BaseModel # @!attribute name # The name of the function to call. # @@ -16,15 +16,12 @@ class ToolChoiceFunction < OpenAI::BaseModel # @return [Symbol, :function] required :type, const: :function - # @!parse - # # Use this option to force the model to call a specific function. - # # - # # @param name [String] - # # @param type [Symbol, :function] - # # - # def initialize(name:, type: :function, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(name:, type: :function) + # Use this option to force the model to call a specific function. + # + # @param name [String] The name of the function to call. + # + # @param type [Symbol, :function] For function calling, the type is always `function`. end end end diff --git a/lib/openai/models/responses/tool_choice_mcp.rb b/lib/openai/models/responses/tool_choice_mcp.rb new file mode 100644 index 00000000..1ced8a01 --- /dev/null +++ b/lib/openai/models/responses/tool_choice_mcp.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ToolChoiceMcp < OpenAI::Internal::Type::BaseModel + # @!attribute server_label + # The label of the MCP server to use. + # + # @return [String] + required :server_label, String + + # @!attribute type + # For MCP tools, the type is always `mcp`. + # + # @return [Symbol, :mcp] + required :type, const: :mcp + + # @!attribute name + # The name of the tool to call on the server. + # + # @return [String, nil] + optional :name, String, nil?: true + + # @!method initialize(server_label:, name: nil, type: :mcp) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ToolChoiceMcp} for more details. + # + # Use this option to force the model to call a specific tool on a remote MCP + # server. + # + # @param server_label [String] The label of the MCP server to use. + # + # @param name [String, nil] The name of the tool to call on the server. + # + # @param type [Symbol, :mcp] For MCP tools, the type is always `mcp`. + end + end + end +end diff --git a/lib/openai/models/responses/tool_choice_options.rb b/lib/openai/models/responses/tool_choice_options.rb index e72582b9..f43db682 100644 --- a/lib/openai/models/responses/tool_choice_options.rb +++ b/lib/openai/models/responses/tool_choice_options.rb @@ -3,22 +3,23 @@ module OpenAI module Models module Responses - # @abstract - # # Controls which (if any) tool is called by the model. # - # `none` means the model will not call any tool and instead generates a message. + # `none` means the model will not call any tool and instead generates a message. # - # `auto` means the model can pick between generating a message or calling one or - # more tools. + # `auto` means the model can pick between generating a message or calling one or + # more tools. # - # `required` means the model must call one or more tools. - class ToolChoiceOptions < OpenAI::Enum + # `required` means the model must call one or more tools. + module ToolChoiceOptions + extend OpenAI::Internal::Type::Enum + NONE = :none AUTO = :auto REQUIRED = :required - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/tool_choice_types.rb b/lib/openai/models/responses/tool_choice_types.rb index 3398854c..8b526e65 100644 --- a/lib/openai/models/responses/tool_choice_types.rb +++ b/lib/openai/models/responses/tool_choice_types.rb @@ -3,47 +3,55 @@ module OpenAI module Models module Responses - class ToolChoiceTypes < OpenAI::BaseModel + class ToolChoiceTypes < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of hosted tool the model should to use. Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). + # [built-in tools](https://platform.openai.com/docs/guides/tools). # - # Allowed values are: + # Allowed values are: # - # - `file_search` - # - `web_search_preview` - # - `computer_use_preview` + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` + # - `code_interpreter` + # - `image_generation` # # @return [Symbol, OpenAI::Models::Responses::ToolChoiceTypes::Type] - required :type, enum: -> { OpenAI::Models::Responses::ToolChoiceTypes::Type } - - # @!parse - # # Indicates that the model should use a built-in tool to generate a response. - # # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). - # # - # # @param type [Symbol, OpenAI::Models::Responses::ToolChoiceTypes::Type] - # # - # def initialize(type:, **) = super + required :type, enum: -> { OpenAI::Responses::ToolChoiceTypes::Type } - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(type:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ToolChoiceTypes} for more details. + # + # Indicates that the model should use a built-in tool to generate a response. + # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). # + # @param type [Symbol, OpenAI::Models::Responses::ToolChoiceTypes::Type] The type of hosted tool the model should to use. Learn more about + # The type of hosted tool the model should to use. Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). + # [built-in tools](https://platform.openai.com/docs/guides/tools). # - # Allowed values are: + # Allowed values are: # - # - `file_search` - # - `web_search_preview` - # - `computer_use_preview` - class Type < OpenAI::Enum + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` + # - `code_interpreter` + # - `image_generation` + # + # @see OpenAI::Models::Responses::ToolChoiceTypes#type + module Type + extend OpenAI::Internal::Type::Enum + FILE_SEARCH = :file_search WEB_SEARCH_PREVIEW = :web_search_preview COMPUTER_USE_PREVIEW = :computer_use_preview WEB_SEARCH_PREVIEW_2025_03_11 = :web_search_preview_2025_03_11 + IMAGE_GENERATION = :image_generation + CODE_INTERPRETER = :code_interpreter - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/responses/web_search_tool.rb b/lib/openai/models/responses/web_search_tool.rb index 6a2acdc0..4dd6b2ec 100644 --- a/lib/openai/models/responses/web_search_tool.rb +++ b/lib/openai/models/responses/web_search_tool.rb @@ -3,129 +3,119 @@ module OpenAI module Models module Responses - class WebSearchTool < OpenAI::BaseModel + class WebSearchTool < OpenAI::Internal::Type::BaseModel # @!attribute type - # The type of the web search tool. One of: - # - # - `web_search_preview` - # - `web_search_preview_2025_03_11` + # The type of the web search tool. One of `web_search_preview` or + # `web_search_preview_2025_03_11`. # # @return [Symbol, OpenAI::Models::Responses::WebSearchTool::Type] - required :type, enum: -> { OpenAI::Models::Responses::WebSearchTool::Type } + required :type, enum: -> { OpenAI::Responses::WebSearchTool::Type } - # @!attribute [r] search_context_size + # @!attribute search_context_size # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. + # search. One of `low`, `medium`, or `high`. `medium` is the default. # # @return [Symbol, OpenAI::Models::Responses::WebSearchTool::SearchContextSize, nil] - optional :search_context_size, enum: -> { OpenAI::Models::Responses::WebSearchTool::SearchContextSize } - - # @!parse - # # @return [Symbol, OpenAI::Models::Responses::WebSearchTool::SearchContextSize] - # attr_writer :search_context_size + optional :search_context_size, enum: -> { OpenAI::Responses::WebSearchTool::SearchContextSize } # @!attribute user_location + # The user's location. # # @return [OpenAI::Models::Responses::WebSearchTool::UserLocation, nil] - optional :user_location, -> { OpenAI::Models::Responses::WebSearchTool::UserLocation }, nil?: true - - # @!parse - # # This tool searches the web for relevant results to use in a response. Learn more - # # about the - # # [web search tool](https://platform.openai.com/docs/guides/tools-web-search). - # # - # # @param type [Symbol, OpenAI::Models::Responses::WebSearchTool::Type] - # # @param search_context_size [Symbol, OpenAI::Models::Responses::WebSearchTool::SearchContextSize] - # # @param user_location [OpenAI::Models::Responses::WebSearchTool::UserLocation, nil] - # # - # def initialize(type:, search_context_size: nil, user_location: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + optional :user_location, -> { OpenAI::Responses::WebSearchTool::UserLocation }, nil?: true + + # @!method initialize(type:, search_context_size: nil, user_location: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::WebSearchTool} for more details. + # + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + # + # @param type [Symbol, OpenAI::Models::Responses::WebSearchTool::Type] The type of the web search tool. One of `web_search_preview` or `web_search_prev # - # The type of the web search tool. One of: + # @param search_context_size [Symbol, OpenAI::Models::Responses::WebSearchTool::SearchContextSize] High level guidance for the amount of context window space to use for the search # - # - `web_search_preview` - # - `web_search_preview_2025_03_11` - class Type < OpenAI::Enum + # @param user_location [OpenAI::Models::Responses::WebSearchTool::UserLocation, nil] The user's location. + + # The type of the web search tool. One of `web_search_preview` or + # `web_search_preview_2025_03_11`. + # + # @see OpenAI::Models::Responses::WebSearchTool#type + module Type + extend OpenAI::Internal::Type::Enum + WEB_SEARCH_PREVIEW = :web_search_preview WEB_SEARCH_PREVIEW_2025_03_11 = :web_search_preview_2025_03_11 - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # High level guidance for the amount of context window space to use for the - # search. One of `low`, `medium`, or `high`. `medium` is the default. - class SearchContextSize < OpenAI::Enum + # search. One of `low`, `medium`, or `high`. `medium` is the default. + # + # @see OpenAI::Models::Responses::WebSearchTool#search_context_size + module SearchContextSize + extend OpenAI::Internal::Type::Enum + LOW = :low MEDIUM = :medium HIGH = :high - finalize! + # @!method self.values + # @return [Array] end - class UserLocation < OpenAI::BaseModel + # @see OpenAI::Models::Responses::WebSearchTool#user_location + class UserLocation < OpenAI::Internal::Type::BaseModel # @!attribute type # The type of location approximation. Always `approximate`. # # @return [Symbol, :approximate] required :type, const: :approximate - # @!attribute [r] city + # @!attribute city # Free text input for the city of the user, e.g. `San Francisco`. # # @return [String, nil] - optional :city, String - - # @!parse - # # @return [String] - # attr_writer :city + optional :city, String, nil?: true - # @!attribute [r] country + # @!attribute country # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of - # the user, e.g. `US`. + # the user, e.g. `US`. # # @return [String, nil] - optional :country, String - - # @!parse - # # @return [String] - # attr_writer :country + optional :country, String, nil?: true - # @!attribute [r] region + # @!attribute region # Free text input for the region of the user, e.g. `California`. # # @return [String, nil] - optional :region, String + optional :region, String, nil?: true - # @!parse - # # @return [String] - # attr_writer :region - - # @!attribute [r] timezone + # @!attribute timezone # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the - # user, e.g. `America/Los_Angeles`. + # user, e.g. `America/Los_Angeles`. # # @return [String, nil] - optional :timezone, String - - # @!parse - # # @return [String] - # attr_writer :timezone - - # @!parse - # # @param city [String] - # # @param country [String] - # # @param region [String] - # # @param timezone [String] - # # @param type [Symbol, :approximate] - # # - # def initialize(city: nil, country: nil, region: nil, timezone: nil, type: :approximate, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :timezone, String, nil?: true + + # @!method initialize(city: nil, country: nil, region: nil, timezone: nil, type: :approximate) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::WebSearchTool::UserLocation} for more details. + # + # The user's location. + # + # @param city [String, nil] Free text input for the city of the user, e.g. `San Francisco`. + # + # @param country [String, nil] The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of t + # + # @param region [String, nil] Free text input for the region of the user, e.g. `California`. + # + # @param timezone [String, nil] The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user + # + # @param type [Symbol, :approximate] The type of location approximation. Always `approximate`. end end end diff --git a/lib/openai/models/responses_model.rb b/lib/openai/models/responses_model.rb new file mode 100644 index 00000000..e86bc725 --- /dev/null +++ b/lib/openai/models/responses_model.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module ResponsesModel + extend OpenAI::Internal::Type::Union + + variant String + + variant enum: -> { OpenAI::ChatModel } + + variant enum: -> { OpenAI::ResponsesModel::ResponsesOnlyModel } + + module ResponsesOnlyModel + extend OpenAI::Internal::Type::Enum + + O1_PRO = :"o1-pro" + O1_PRO_2025_03_19 = :"o1-pro-2025-03-19" + O3_PRO = :"o3-pro" + O3_PRO_2025_06_10 = :"o3-pro-2025-06-10" + O3_DEEP_RESEARCH = :"o3-deep-research" + O3_DEEP_RESEARCH_2025_06_26 = :"o3-deep-research-2025-06-26" + O4_MINI_DEEP_RESEARCH = :"o4-mini-deep-research" + O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26" + COMPUTER_USE_PREVIEW = :"computer-use-preview" + COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11" + + # @!method self.values + # @return [Array] + end + + # @!method self.variants + # @return [Array(String, Symbol, OpenAI::Models::ChatModel, Symbol, OpenAI::Models::ResponsesModel::ResponsesOnlyModel)] + end + end +end diff --git a/lib/openai/models/static_file_chunking_strategy.rb b/lib/openai/models/static_file_chunking_strategy.rb index 730e687e..ac676b5d 100644 --- a/lib/openai/models/static_file_chunking_strategy.rb +++ b/lib/openai/models/static_file_chunking_strategy.rb @@ -2,29 +2,29 @@ module OpenAI module Models - class StaticFileChunkingStrategy < OpenAI::BaseModel + class StaticFileChunkingStrategy < OpenAI::Internal::Type::BaseModel # @!attribute chunk_overlap_tokens # The number of tokens that overlap between chunks. The default value is `400`. # - # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. # # @return [Integer] required :chunk_overlap_tokens, Integer # @!attribute max_chunk_size_tokens # The maximum number of tokens in each chunk. The default value is `800`. The - # minimum value is `100` and the maximum value is `4096`. + # minimum value is `100` and the maximum value is `4096`. # # @return [Integer] required :max_chunk_size_tokens, Integer - # @!parse - # # @param chunk_overlap_tokens [Integer] - # # @param max_chunk_size_tokens [Integer] - # # - # def initialize(chunk_overlap_tokens:, max_chunk_size_tokens:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(chunk_overlap_tokens:, max_chunk_size_tokens:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::StaticFileChunkingStrategy} for more details. + # + # @param chunk_overlap_tokens [Integer] The number of tokens that overlap between chunks. The default value is `400`. + # + # @param max_chunk_size_tokens [Integer] The maximum number of tokens in each chunk. The default value is `800`. The mini end end end diff --git a/lib/openai/models/static_file_chunking_strategy_object.rb b/lib/openai/models/static_file_chunking_strategy_object.rb index be6299c5..43c0f303 100644 --- a/lib/openai/models/static_file_chunking_strategy_object.rb +++ b/lib/openai/models/static_file_chunking_strategy_object.rb @@ -2,11 +2,11 @@ module OpenAI module Models - class StaticFileChunkingStrategyObject < OpenAI::BaseModel + class StaticFileChunkingStrategyObject < OpenAI::Internal::Type::BaseModel # @!attribute static # # @return [OpenAI::Models::StaticFileChunkingStrategy] - required :static, -> { OpenAI::Models::StaticFileChunkingStrategy } + required :static, -> { OpenAI::StaticFileChunkingStrategy } # @!attribute type # Always `static`. @@ -14,13 +14,10 @@ class StaticFileChunkingStrategyObject < OpenAI::BaseModel # @return [Symbol, :static] required :type, const: :static - # @!parse - # # @param static [OpenAI::Models::StaticFileChunkingStrategy] - # # @param type [Symbol, :static] - # # - # def initialize(static:, type: :static, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(static:, type: :static) + # @param static [OpenAI::Models::StaticFileChunkingStrategy] + # + # @param type [Symbol, :static] Always `static`. end end end diff --git a/lib/openai/models/static_file_chunking_strategy_object_param.rb b/lib/openai/models/static_file_chunking_strategy_object_param.rb index 21b6d0ac..3368e144 100644 --- a/lib/openai/models/static_file_chunking_strategy_object_param.rb +++ b/lib/openai/models/static_file_chunking_strategy_object_param.rb @@ -2,11 +2,11 @@ module OpenAI module Models - class StaticFileChunkingStrategyObjectParam < OpenAI::BaseModel + class StaticFileChunkingStrategyObjectParam < OpenAI::Internal::Type::BaseModel # @!attribute static # # @return [OpenAI::Models::StaticFileChunkingStrategy] - required :static, -> { OpenAI::Models::StaticFileChunkingStrategy } + required :static, -> { OpenAI::StaticFileChunkingStrategy } # @!attribute type # Always `static`. @@ -14,15 +14,12 @@ class StaticFileChunkingStrategyObjectParam < OpenAI::BaseModel # @return [Symbol, :static] required :type, const: :static - # @!parse - # # Customize your own chunking strategy by setting chunk size and chunk overlap. - # # - # # @param static [OpenAI::Models::StaticFileChunkingStrategy] - # # @param type [Symbol, :static] - # # - # def initialize(static:, type: :static, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(static:, type: :static) + # Customize your own chunking strategy by setting chunk size and chunk overlap. + # + # @param static [OpenAI::Models::StaticFileChunkingStrategy] + # + # @param type [Symbol, :static] Always `static`. end end end diff --git a/lib/openai/models/upload.rb b/lib/openai/models/upload.rb index 0e6b2be5..e51974b2 100644 --- a/lib/openai/models/upload.rb +++ b/lib/openai/models/upload.rb @@ -2,7 +2,8 @@ module OpenAI module Models - class Upload < OpenAI::BaseModel + # @see OpenAI::Resources::Uploads#create + class Upload < OpenAI::Internal::Type::BaseModel # @!attribute id # The Upload unique identifier, which can be referenced in API endpoints. # @@ -41,8 +42,8 @@ class Upload < OpenAI::BaseModel # @!attribute purpose # The intended purpose of the file. - # [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) - # for acceptable values. + # [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) + # for acceptable values. # # @return [String] required :purpose, String @@ -51,41 +52,51 @@ class Upload < OpenAI::BaseModel # The status of the Upload. # # @return [Symbol, OpenAI::Models::Upload::Status] - required :status, enum: -> { OpenAI::Models::Upload::Status } + required :status, enum: -> { OpenAI::Upload::Status } # @!attribute file # The `File` object represents a document that has been uploaded to OpenAI. # # @return [OpenAI::Models::FileObject, nil] - optional :file, -> { OpenAI::Models::FileObject }, nil?: true + optional :file, -> { OpenAI::FileObject }, nil?: true - # @!parse - # # The Upload object can accept byte chunks in the form of Parts. - # # - # # @param id [String] - # # @param bytes [Integer] - # # @param created_at [Integer] - # # @param expires_at [Integer] - # # @param filename [String] - # # @param purpose [String] - # # @param status [Symbol, OpenAI::Models::Upload::Status] - # # @param file [OpenAI::Models::FileObject, nil] - # # @param object [Symbol, :upload] - # # - # def initialize(id:, bytes:, created_at:, expires_at:, filename:, purpose:, status:, file: nil, object: :upload, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(id:, bytes:, created_at:, expires_at:, filename:, purpose:, status:, file: nil, object: :upload) + # Some parameter documentations has been truncated, see {OpenAI::Models::Upload} + # for more details. + # + # The Upload object can accept byte chunks in the form of Parts. + # + # @param id [String] The Upload unique identifier, which can be referenced in API endpoints. + # + # @param bytes [Integer] The intended number of bytes to be uploaded. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the Upload was created. + # + # @param expires_at [Integer] The Unix timestamp (in seconds) for when the Upload will expire. # + # @param filename [String] The name of the file to be uploaded. + # + # @param purpose [String] The intended purpose of the file. [Please refer here](https://platform.openai.co + # + # @param status [Symbol, OpenAI::Models::Upload::Status] The status of the Upload. + # + # @param file [OpenAI::Models::FileObject, nil] The `File` object represents a document that has been uploaded to OpenAI. + # + # @param object [Symbol, :upload] The object type, which is always "upload". + # The status of the Upload. - class Status < OpenAI::Enum + # + # @see OpenAI::Models::Upload#status + module Status + extend OpenAI::Internal::Type::Enum + PENDING = :pending COMPLETED = :completed CANCELLED = :cancelled EXPIRED = :expired - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/upload_cancel_params.rb b/lib/openai/models/upload_cancel_params.rb index 164c052a..717800ca 100644 --- a/lib/openai/models/upload_cancel_params.rb +++ b/lib/openai/models/upload_cancel_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class UploadCancelParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Uploads#cancel + class UploadCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/upload_complete_params.rb b/lib/openai/models/upload_complete_params.rb index 1a2b22be..2d67440d 100644 --- a/lib/openai/models/upload_complete_params.rb +++ b/lib/openai/models/upload_complete_params.rb @@ -2,36 +2,33 @@ module OpenAI module Models - class UploadCompleteParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Uploads#complete + class UploadCompleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute part_ids # The ordered list of Part IDs. # # @return [Array] - required :part_ids, OpenAI::ArrayOf[String] + required :part_ids, OpenAI::Internal::Type::ArrayOf[String] - # @!attribute [r] md5 + # @!attribute md5 # The optional md5 checksum for the file contents to verify if the bytes uploaded - # matches what you expect. + # matches what you expect. # # @return [String, nil] optional :md5, String - # @!parse - # # @return [String] - # attr_writer :md5 - - # @!parse - # # @param part_ids [Array] - # # @param md5 [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(part_ids:, md5: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(part_ids:, md5: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::UploadCompleteParams} for more details. + # + # @param part_ids [Array] The ordered list of Part IDs. + # + # @param md5 [String] The optional md5 checksum for the file contents to verify if the bytes uploaded + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/upload_create_params.rb b/lib/openai/models/upload_create_params.rb index 6e279c41..2431059e 100644 --- a/lib/openai/models/upload_create_params.rb +++ b/lib/openai/models/upload_create_params.rb @@ -2,10 +2,10 @@ module OpenAI module Models - class UploadCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Uploads#create + class UploadCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute bytes # The number of bytes in the file you are uploading. @@ -22,8 +22,8 @@ class UploadCreateParams < OpenAI::BaseModel # @!attribute mime_type # The MIME type of the file. # - # This must fall within the supported MIME types for your file purpose. See the - # supported MIME types for assistants and vision. + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. # # @return [String] required :mime_type, String @@ -31,22 +31,61 @@ class UploadCreateParams < OpenAI::BaseModel # @!attribute purpose # The intended purpose of the uploaded file. # - # See the - # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). # # @return [Symbol, OpenAI::Models::FilePurpose] - required :purpose, enum: -> { OpenAI::Models::FilePurpose } - - # @!parse - # # @param bytes [Integer] - # # @param filename [String] - # # @param mime_type [String] - # # @param purpose [Symbol, OpenAI::Models::FilePurpose] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(bytes:, filename:, mime_type:, purpose:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + required :purpose, enum: -> { OpenAI::FilePurpose } + + # @!attribute expires_after + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + # + # @return [OpenAI::Models::UploadCreateParams::ExpiresAfter, nil] + optional :expires_after, -> { OpenAI::UploadCreateParams::ExpiresAfter } + + # @!method initialize(bytes:, filename:, mime_type:, purpose:, expires_after: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::UploadCreateParams} for more details. + # + # @param bytes [Integer] The number of bytes in the file you are uploading. + # + # @param filename [String] The name of the file to upload. + # + # @param mime_type [String] The MIME type of the file. + # + # @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file. + # + # @param expires_after [OpenAI::Models::UploadCreateParams::ExpiresAfter] The expiration policy for a file. By default, files with `purpose=batch` expire + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + # @!attribute anchor + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `created_at`. + # + # @return [Symbol, :created_at] + required :anchor, const: :created_at + + # @!attribute seconds + # The number of seconds after the anchor time that the file will expire. Must be + # between 3600 (1 hour) and 2592000 (30 days). + # + # @return [Integer] + required :seconds, Integer + + # @!method initialize(seconds:, anchor: :created_at) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::UploadCreateParams::ExpiresAfter} for more details. + # + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + # + # @param seconds [Integer] The number of seconds after the anchor time that the file will expire. Must be b + # + # @param anchor [Symbol, :created_at] Anchor timestamp after which the expiration policy applies. Supported anchors: ` + end end end end diff --git a/lib/openai/models/uploads/part_create_params.rb b/lib/openai/models/uploads/part_create_params.rb index a25b50cc..1df047de 100644 --- a/lib/openai/models/uploads/part_create_params.rb +++ b/lib/openai/models/uploads/part_create_params.rb @@ -3,24 +3,24 @@ module OpenAI module Models module Uploads - class PartCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::Uploads::Parts#create + class PartCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute data # The chunk of bytes for this Part. # - # @return [IO, StringIO] - required :data, IO + # @return [Pathname, StringIO, IO, String, OpenAI::FilePart] + required :data, OpenAI::Internal::Type::FileInput - # @!parse - # # @param data [IO, StringIO] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(data:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(data:, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Uploads::PartCreateParams} for more details. + # + # @param data [Pathname, StringIO, IO, String, OpenAI::FilePart] The chunk of bytes for this Part. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/uploads/upload_part.rb b/lib/openai/models/uploads/upload_part.rb index 8edf74ce..f0b61875 100644 --- a/lib/openai/models/uploads/upload_part.rb +++ b/lib/openai/models/uploads/upload_part.rb @@ -3,7 +3,8 @@ module OpenAI module Models module Uploads - class UploadPart < OpenAI::BaseModel + # @see OpenAI::Resources::Uploads::Parts#create + class UploadPart < OpenAI::Internal::Type::BaseModel # @!attribute id # The upload Part unique identifier, which can be referenced in API endpoints. # @@ -28,17 +29,16 @@ class UploadPart < OpenAI::BaseModel # @return [String] required :upload_id, String - # @!parse - # # The upload Part represents a chunk of bytes we can add to an Upload object. - # # - # # @param id [String] - # # @param created_at [Integer] - # # @param upload_id [String] - # # @param object [Symbol, :"upload.part"] - # # - # def initialize(id:, created_at:, upload_id:, object: :"upload.part", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, created_at:, upload_id:, object: :"upload.part") + # The upload Part represents a chunk of bytes we can add to an Upload object. + # + # @param id [String] The upload Part unique identifier, which can be referenced in API endpoints. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the Part was created. + # + # @param upload_id [String] The ID of the Upload object that this Part was added to. + # + # @param object [Symbol, :"upload.part"] The object type, which is always `upload.part`. end end diff --git a/lib/openai/models/vector_store.rb b/lib/openai/models/vector_store.rb index 7593bb95..6a930067 100644 --- a/lib/openai/models/vector_store.rb +++ b/lib/openai/models/vector_store.rb @@ -2,7 +2,8 @@ module OpenAI module Models - class VectorStore < OpenAI::BaseModel + # @see OpenAI::Resources::VectorStores#create + class VectorStore < OpenAI::Internal::Type::BaseModel # @!attribute id # The identifier, which can be referenced in API endpoints. # @@ -18,7 +19,7 @@ class VectorStore < OpenAI::BaseModel # @!attribute file_counts # # @return [OpenAI::Models::VectorStore::FileCounts] - required :file_counts, -> { OpenAI::Models::VectorStore::FileCounts } + required :file_counts, -> { OpenAI::VectorStore::FileCounts } # @!attribute last_active_at # The Unix timestamp (in seconds) for when the vector store was last active. @@ -28,14 +29,14 @@ class VectorStore < OpenAI::BaseModel # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - required :metadata, OpenAI::HashOf[String], nil?: true + required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute name # The name of the vector store. @@ -51,11 +52,11 @@ class VectorStore < OpenAI::BaseModel # @!attribute status # The status of the vector store, which can be either `expired`, `in_progress`, or - # `completed`. A status of `completed` indicates that the vector store is ready - # for use. + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. # # @return [Symbol, OpenAI::Models::VectorStore::Status] - required :status, enum: -> { OpenAI::Models::VectorStore::Status } + required :status, enum: -> { OpenAI::VectorStore::Status } # @!attribute usage_bytes # The total number of bytes used by the files in the vector store. @@ -63,15 +64,11 @@ class VectorStore < OpenAI::BaseModel # @return [Integer] required :usage_bytes, Integer - # @!attribute [r] expires_after + # @!attribute expires_after # The expiration policy for a vector store. # # @return [OpenAI::Models::VectorStore::ExpiresAfter, nil] - optional :expires_after, -> { OpenAI::Models::VectorStore::ExpiresAfter } - - # @!parse - # # @return [OpenAI::Models::VectorStore::ExpiresAfter] - # attr_writer :expires_after + optional :expires_after, -> { OpenAI::VectorStore::ExpiresAfter } # @!attribute expires_at # The Unix timestamp (in seconds) for when the vector store will expire. @@ -79,42 +76,37 @@ class VectorStore < OpenAI::BaseModel # @return [Integer, nil] optional :expires_at, Integer, nil?: true - # @!parse - # # A vector store is a collection of processed files can be used by the - # # `file_search` tool. - # # - # # @param id [String] - # # @param created_at [Integer] - # # @param file_counts [OpenAI::Models::VectorStore::FileCounts] - # # @param last_active_at [Integer, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param name [String] - # # @param status [Symbol, OpenAI::Models::VectorStore::Status] - # # @param usage_bytes [Integer] - # # @param expires_after [OpenAI::Models::VectorStore::ExpiresAfter] - # # @param expires_at [Integer, nil] - # # @param object [Symbol, :vector_store] - # # - # def initialize( - # id:, - # created_at:, - # file_counts:, - # last_active_at:, - # metadata:, - # name:, - # status:, - # usage_bytes:, - # expires_after: nil, - # expires_at: nil, - # object: :vector_store, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class FileCounts < OpenAI::BaseModel + # @!method initialize(id:, created_at:, file_counts:, last_active_at:, metadata:, name:, status:, usage_bytes:, expires_after: nil, expires_at: nil, object: :vector_store) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStore} for more details. + # + # A vector store is a collection of processed files can be used by the + # `file_search` tool. + # + # @param id [String] The identifier, which can be referenced in API endpoints. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the vector store was created. + # + # @param file_counts [OpenAI::Models::VectorStore::FileCounts] + # + # @param last_active_at [Integer, nil] The Unix timestamp (in seconds) for when the vector store was last active. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] The name of the vector store. + # + # @param status [Symbol, OpenAI::Models::VectorStore::Status] The status of the vector store, which can be either `expired`, `in_progress`, or + # + # @param usage_bytes [Integer] The total number of bytes used by the files in the vector store. + # + # @param expires_after [OpenAI::Models::VectorStore::ExpiresAfter] The expiration policy for a vector store. + # + # @param expires_at [Integer, nil] The Unix timestamp (in seconds) for when the vector store will expire. + # + # @param object [Symbol, :vector_store] The object type, which is always `vector_store`. + + # @see OpenAI::Models::VectorStore#file_counts + class FileCounts < OpenAI::Internal::Type::BaseModel # @!attribute cancelled # The number of files that were cancelled. # @@ -145,35 +137,39 @@ class FileCounts < OpenAI::BaseModel # @return [Integer] required :total, Integer - # @!parse - # # @param cancelled [Integer] - # # @param completed [Integer] - # # @param failed [Integer] - # # @param in_progress [Integer] - # # @param total [Integer] - # # - # def initialize(cancelled:, completed:, failed:, in_progress:, total:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(cancelled:, completed:, failed:, in_progress:, total:) + # @param cancelled [Integer] The number of files that were cancelled. + # + # @param completed [Integer] The number of files that have been successfully processed. + # + # @param failed [Integer] The number of files that have failed to process. + # + # @param in_progress [Integer] The number of files that are currently being processed. + # + # @param total [Integer] The total number of files. end - # @abstract - # # The status of the vector store, which can be either `expired`, `in_progress`, or - # `completed`. A status of `completed` indicates that the vector store is ready - # for use. - class Status < OpenAI::Enum + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. + # + # @see OpenAI::Models::VectorStore#status + module Status + extend OpenAI::Internal::Type::Enum + EXPIRED = :expired IN_PROGRESS = :in_progress COMPLETED = :completed - finalize! + # @!method self.values + # @return [Array] end - class ExpiresAfter < OpenAI::BaseModel + # @see OpenAI::Models::VectorStore#expires_after + class ExpiresAfter < OpenAI::Internal::Type::BaseModel # @!attribute anchor # Anchor timestamp after which the expiration policy applies. Supported anchors: - # `last_active_at`. + # `last_active_at`. # # @return [Symbol, :last_active_at] required :anchor, const: :last_active_at @@ -184,15 +180,15 @@ class ExpiresAfter < OpenAI::BaseModel # @return [Integer] required :days, Integer - # @!parse - # # The expiration policy for a vector store. - # # - # # @param days [Integer] - # # @param anchor [Symbol, :last_active_at] - # # - # def initialize(days:, anchor: :last_active_at, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(days:, anchor: :last_active_at) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStore::ExpiresAfter} for more details. + # + # The expiration policy for a vector store. + # + # @param days [Integer] The number of days after the anchor time that the vector store will expire. + # + # @param anchor [Symbol, :last_active_at] Anchor timestamp after which the expiration policy applies. Supported anchors: ` end end end diff --git a/lib/openai/models/vector_store_create_params.rb b/lib/openai/models/vector_store_create_params.rb index 9197db12..dfe50418 100644 --- a/lib/openai/models/vector_store_create_params.rb +++ b/lib/openai/models/vector_store_create_params.rb @@ -2,91 +2,69 @@ module OpenAI module Models - class VectorStoreCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores#create + class VectorStoreCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] chunking_strategy + # @!attribute chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. # # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil] - optional :chunking_strategy, union: -> { OpenAI::Models::FileChunkingStrategyParam } + optional :chunking_strategy, union: -> { OpenAI::FileChunkingStrategyParam } - # @!parse - # # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] - # attr_writer :chunking_strategy - - # @!attribute [r] expires_after + # @!attribute expires_after # The expiration policy for a vector store. # # @return [OpenAI::Models::VectorStoreCreateParams::ExpiresAfter, nil] - optional :expires_after, -> { OpenAI::Models::VectorStoreCreateParams::ExpiresAfter } - - # @!parse - # # @return [OpenAI::Models::VectorStoreCreateParams::ExpiresAfter] - # attr_writer :expires_after + optional :expires_after, -> { OpenAI::VectorStoreCreateParams::ExpiresAfter } - # @!attribute [r] file_ids + # @!attribute file_ids # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. + # the vector store should use. Useful for tools like `file_search` that can access + # files. # # @return [Array, nil] - optional :file_ids, OpenAI::ArrayOf[String] - - # @!parse - # # @return [Array] - # attr_writer :file_ids + optional :file_ids, OpenAI::Internal::Type::ArrayOf[String] # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true - # @!attribute [r] name + # @!attribute name # The name of the vector store. # # @return [String, nil] optional :name, String - # @!parse - # # @return [String] - # attr_writer :name - - # @!parse - # # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] - # # @param expires_after [OpenAI::Models::VectorStoreCreateParams::ExpiresAfter] - # # @param file_ids [Array] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param name [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # chunking_strategy: nil, - # expires_after: nil, - # file_ids: nil, - # metadata: nil, - # name: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(chunking_strategy: nil, expires_after: nil, file_ids: nil, metadata: nil, name: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreCreateParams} for more details. + # + # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # + # @param expires_after [OpenAI::Models::VectorStoreCreateParams::ExpiresAfter] The expiration policy for a vector store. + # + # @param file_ids [Array] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] The name of the vector store. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - class ExpiresAfter < OpenAI::BaseModel + class ExpiresAfter < OpenAI::Internal::Type::BaseModel # @!attribute anchor # Anchor timestamp after which the expiration policy applies. Supported anchors: - # `last_active_at`. + # `last_active_at`. # # @return [Symbol, :last_active_at] required :anchor, const: :last_active_at @@ -97,15 +75,15 @@ class ExpiresAfter < OpenAI::BaseModel # @return [Integer] required :days, Integer - # @!parse - # # The expiration policy for a vector store. - # # - # # @param days [Integer] - # # @param anchor [Symbol, :last_active_at] - # # - # def initialize(days:, anchor: :last_active_at, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(days:, anchor: :last_active_at) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreCreateParams::ExpiresAfter} for more details. + # + # The expiration policy for a vector store. + # + # @param days [Integer] The number of days after the anchor time that the vector store will expire. + # + # @param anchor [Symbol, :last_active_at] Anchor timestamp after which the expiration policy applies. Supported anchors: ` end end end diff --git a/lib/openai/models/vector_store_delete_params.rb b/lib/openai/models/vector_store_delete_params.rb index 0d599c50..11a788e6 100644 --- a/lib/openai/models/vector_store_delete_params.rb +++ b/lib/openai/models/vector_store_delete_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class VectorStoreDeleteParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores#delete + class VectorStoreDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/vector_store_deleted.rb b/lib/openai/models/vector_store_deleted.rb index 018837f0..ecc812bc 100644 --- a/lib/openai/models/vector_store_deleted.rb +++ b/lib/openai/models/vector_store_deleted.rb @@ -2,7 +2,8 @@ module OpenAI module Models - class VectorStoreDeleted < OpenAI::BaseModel + # @see OpenAI::Resources::VectorStores#delete + class VectorStoreDeleted < OpenAI::Internal::Type::BaseModel # @!attribute id # # @return [String] @@ -11,21 +12,17 @@ class VectorStoreDeleted < OpenAI::BaseModel # @!attribute deleted # # @return [Boolean] - required :deleted, OpenAI::BooleanModel + required :deleted, OpenAI::Internal::Type::Boolean # @!attribute object # # @return [Symbol, :"vector_store.deleted"] required :object, const: :"vector_store.deleted" - # @!parse - # # @param id [String] - # # @param deleted [Boolean] - # # @param object [Symbol, :"vector_store.deleted"] - # # - # def initialize(id:, deleted:, object: :"vector_store.deleted", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, deleted:, object: :"vector_store.deleted") + # @param id [String] + # @param deleted [Boolean] + # @param object [Symbol, :"vector_store.deleted"] end end end diff --git a/lib/openai/models/vector_store_list_params.rb b/lib/openai/models/vector_store_list_params.rb index ee2a79cf..fcdc7d40 100644 --- a/lib/openai/models/vector_store_list_params.rb +++ b/lib/openai/models/vector_store_list_params.rb @@ -2,79 +2,67 @@ module OpenAI module Models - class VectorStoreListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores#list + class VectorStoreListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] before + # @!attribute before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String - # @!parse - # # @return [String] - # attr_writer :before - - # @!attribute [r] limit + # @!attribute limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!attribute [r] order + # @!attribute order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::VectorStoreListParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::VectorStoreListParams::Order } - - # @!parse - # # @return [Symbol, OpenAI::Models::VectorStoreListParams::Order] - # attr_writer :order - - # @!parse - # # @param after [String] - # # @param before [String] - # # @param limit [Integer] - # # @param order [Symbol, OpenAI::Models::VectorStoreListParams::Order] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, before: nil, limit: nil, order: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + optional :order, enum: -> { OpenAI::VectorStoreListParams::Order } - # @abstract + # @!method initialize(after: nil, before: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreListParams} for more details. # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::VectorStoreListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. - class Order < OpenAI::Enum + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/vector_store_retrieve_params.rb b/lib/openai/models/vector_store_retrieve_params.rb index 8051213c..004d1047 100644 --- a/lib/openai/models/vector_store_retrieve_params.rb +++ b/lib/openai/models/vector_store_retrieve_params.rb @@ -2,17 +2,13 @@ module OpenAI module Models - class VectorStoreRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores#retrieve + class VectorStoreRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!parse - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/vector_store_search_params.rb b/lib/openai/models/vector_store_search_params.rb index ff451753..fdf4e91f 100644 --- a/lib/openai/models/vector_store_search_params.rb +++ b/lib/openai/models/vector_store_search_params.rb @@ -2,138 +2,118 @@ module OpenAI module Models - class VectorStoreSearchParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores#search + class VectorStoreSearchParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute query # A query string for a search # # @return [String, Array] - required :query, union: -> { OpenAI::Models::VectorStoreSearchParams::Query } + required :query, union: -> { OpenAI::VectorStoreSearchParams::Query } - # @!attribute [r] filters + # @!attribute filters # A filter to apply based on file attributes. # # @return [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter, nil] - optional :filters, union: -> { OpenAI::Models::VectorStoreSearchParams::Filters } + optional :filters, union: -> { OpenAI::VectorStoreSearchParams::Filters } - # @!parse - # # @return [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] - # attr_writer :filters - - # @!attribute [r] max_num_results + # @!attribute max_num_results # The maximum number of results to return. This number should be between 1 and 50 - # inclusive. + # inclusive. # # @return [Integer, nil] optional :max_num_results, Integer - # @!parse - # # @return [Integer] - # attr_writer :max_num_results - - # @!attribute [r] ranking_options + # @!attribute ranking_options # Ranking options for search. # # @return [OpenAI::Models::VectorStoreSearchParams::RankingOptions, nil] - optional :ranking_options, -> { OpenAI::Models::VectorStoreSearchParams::RankingOptions } - - # @!parse - # # @return [OpenAI::Models::VectorStoreSearchParams::RankingOptions] - # attr_writer :ranking_options + optional :ranking_options, -> { OpenAI::VectorStoreSearchParams::RankingOptions } - # @!attribute [r] rewrite_query + # @!attribute rewrite_query # Whether to rewrite the natural language query for vector search. # # @return [Boolean, nil] - optional :rewrite_query, OpenAI::BooleanModel - - # @!parse - # # @return [Boolean] - # attr_writer :rewrite_query - - # @!parse - # # @param query [String, Array] - # # @param filters [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] - # # @param max_num_results [Integer] - # # @param ranking_options [OpenAI::Models::VectorStoreSearchParams::RankingOptions] - # # @param rewrite_query [Boolean] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize( - # query:, - # filters: nil, - # max_num_results: nil, - # ranking_options: nil, - # rewrite_query: nil, - # request_options: {}, - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + optional :rewrite_query, OpenAI::Internal::Type::Boolean + + # @!method initialize(query:, filters: nil, max_num_results: nil, ranking_options: nil, rewrite_query: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreSearchParams} for more details. + # + # @param query [String, Array] A query string for a search + # + # @param filters [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] A filter to apply based on file attributes. # + # @param max_num_results [Integer] The maximum number of results to return. This number should be between 1 and 50 + # + # @param ranking_options [OpenAI::Models::VectorStoreSearchParams::RankingOptions] Ranking options for search. + # + # @param rewrite_query [Boolean] Whether to rewrite the natural language query for vector search. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # A query string for a search - class Query < OpenAI::Union - StringArray = OpenAI::ArrayOf[String] + module Query + extend OpenAI::Internal::Type::Union variant String - variant OpenAI::Models::VectorStoreSearchParams::Query::StringArray + variant -> { OpenAI::Models::VectorStoreSearchParams::Query::StringArray } + + # @!method self.variants + # @return [Array(String, Array)] + + # @type [OpenAI::Internal::Type::Converter] + StringArray = OpenAI::Internal::Type::ArrayOf[String] end - # @abstract - # # A filter to apply based on file attributes. - class Filters < OpenAI::Union + module Filters + extend OpenAI::Internal::Type::Union + # A filter used to compare a specified attribute key to a given value using a defined comparison operation. - variant -> { OpenAI::Models::ComparisonFilter } + variant -> { OpenAI::ComparisonFilter } # Combine multiple filters using `and` or `or`. - variant -> { OpenAI::Models::CompoundFilter } + variant -> { OpenAI::CompoundFilter } + + # @!method self.variants + # @return [Array(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)] end - class RankingOptions < OpenAI::BaseModel - # @!attribute [r] ranker + class RankingOptions < OpenAI::Internal::Type::BaseModel + # @!attribute ranker + # Enable re-ranking; set to `none` to disable, which can help reduce latency. # # @return [Symbol, OpenAI::Models::VectorStoreSearchParams::RankingOptions::Ranker, nil] - optional :ranker, enum: -> { OpenAI::Models::VectorStoreSearchParams::RankingOptions::Ranker } - - # @!parse - # # @return [Symbol, OpenAI::Models::VectorStoreSearchParams::RankingOptions::Ranker] - # attr_writer :ranker + optional :ranker, enum: -> { OpenAI::VectorStoreSearchParams::RankingOptions::Ranker } - # @!attribute [r] score_threshold + # @!attribute score_threshold # # @return [Float, nil] optional :score_threshold, Float - # @!parse - # # @return [Float] - # attr_writer :score_threshold - - # @!parse - # # Ranking options for search. - # # - # # @param ranker [Symbol, OpenAI::Models::VectorStoreSearchParams::RankingOptions::Ranker] - # # @param score_threshold [Float] - # # - # def initialize(ranker: nil, score_threshold: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(ranker: nil, score_threshold: nil) + # Ranking options for search. + # + # @param ranker [Symbol, OpenAI::Models::VectorStoreSearchParams::RankingOptions::Ranker] Enable re-ranking; set to `none` to disable, which can help reduce latency. + # + # @param score_threshold [Float] - # @abstract + # Enable re-ranking; set to `none` to disable, which can help reduce latency. # - class Ranker < OpenAI::Enum + # @see OpenAI::Models::VectorStoreSearchParams::RankingOptions#ranker + module Ranker + extend OpenAI::Internal::Type::Enum + + NONE = :none AUTO = :auto DEFAULT_2024_11_15 = :"default-2024-11-15" - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/vector_store_search_response.rb b/lib/openai/models/vector_store_search_response.rb index aa09a30a..5b623829 100644 --- a/lib/openai/models/vector_store_search_response.rb +++ b/lib/openai/models/vector_store_search_response.rb @@ -2,24 +2,26 @@ module OpenAI module Models - class VectorStoreSearchResponse < OpenAI::BaseModel + # @see OpenAI::Resources::VectorStores#search + class VectorStoreSearchResponse < OpenAI::Internal::Type::BaseModel # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] required :attributes, - -> { OpenAI::HashOf[union: OpenAI::Models::VectorStoreSearchResponse::Attribute] }, + -> { OpenAI::Internal::Type::HashOf[union: OpenAI::Models::VectorStoreSearchResponse::Attribute] }, nil?: true # @!attribute content # Content chunks from the file. # # @return [Array] - required :content, -> { OpenAI::ArrayOf[OpenAI::Models::VectorStoreSearchResponse::Content] } + required :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::VectorStoreSearchResponse::Content] } # @!attribute file_id # The ID of the vector store file. @@ -39,28 +41,34 @@ class VectorStoreSearchResponse < OpenAI::BaseModel # @return [Float] required :score, Float - # @!parse - # # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] - # # @param content [Array] - # # @param file_id [String] - # # @param filename [String] - # # @param score [Float] - # # - # def initialize(attributes:, content:, file_id:, filename:, score:, **) = super + # @!method initialize(attributes:, content:, file_id:, filename:, score:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreSearchResponse} for more details. + # + # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param content [Array] Content chunks from the file. + # + # @param file_id [String] The ID of the vector store file. + # + # @param filename [String] The name of the vector store file. + # + # @param score [Float] The similarity score for the result. - # def initialize: (Hash | OpenAI::BaseModel) -> void + module Attribute + extend OpenAI::Internal::Type::Union - # @abstract - # - class Attribute < OpenAI::Union variant String variant Float - variant OpenAI::BooleanModel + variant OpenAI::Internal::Type::Boolean + + # @!method self.variants + # @return [Array(String, Float, Boolean)] end - class Content < OpenAI::BaseModel + class Content < OpenAI::Internal::Type::BaseModel # @!attribute text # The text content returned from search. # @@ -73,21 +81,21 @@ class Content < OpenAI::BaseModel # @return [Symbol, OpenAI::Models::VectorStoreSearchResponse::Content::Type] required :type, enum: -> { OpenAI::Models::VectorStoreSearchResponse::Content::Type } - # @!parse - # # @param text [String] - # # @param type [Symbol, OpenAI::Models::VectorStoreSearchResponse::Content::Type] - # # - # def initialize(text:, type:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(text:, type:) + # @param text [String] The text content returned from search. # + # @param type [Symbol, OpenAI::Models::VectorStoreSearchResponse::Content::Type] The type of content. + # The type of content. - class Type < OpenAI::Enum + # + # @see OpenAI::Models::VectorStoreSearchResponse::Content#type + module Type + extend OpenAI::Internal::Type::Enum + TEXT = :text - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/vector_store_update_params.rb b/lib/openai/models/vector_store_update_params.rb index 8dcdbb59..81557821 100644 --- a/lib/openai/models/vector_store_update_params.rb +++ b/lib/openai/models/vector_store_update_params.rb @@ -2,27 +2,27 @@ module OpenAI module Models - class VectorStoreUpdateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores#update + class VectorStoreUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute expires_after # The expiration policy for a vector store. # # @return [OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter, nil] - optional :expires_after, -> { OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter }, nil?: true + optional :expires_after, -> { OpenAI::VectorStoreUpdateParams::ExpiresAfter }, nil?: true # @!attribute metadata # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. # # @return [Hash{Symbol=>String}, nil] - optional :metadata, OpenAI::HashOf[String], nil?: true + optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true # @!attribute name # The name of the vector store. @@ -30,20 +30,22 @@ class VectorStoreUpdateParams < OpenAI::BaseModel # @return [String, nil] optional :name, String, nil?: true - # @!parse - # # @param expires_after [OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter, nil] - # # @param metadata [Hash{Symbol=>String}, nil] - # # @param name [String, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(expires_after: nil, metadata: nil, name: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(expires_after: nil, metadata: nil, name: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreUpdateParams} for more details. + # + # @param expires_after [OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter, nil] The expiration policy for a vector store. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String, nil] The name of the vector store. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - class ExpiresAfter < OpenAI::BaseModel + class ExpiresAfter < OpenAI::Internal::Type::BaseModel # @!attribute anchor # Anchor timestamp after which the expiration policy applies. Supported anchors: - # `last_active_at`. + # `last_active_at`. # # @return [Symbol, :last_active_at] required :anchor, const: :last_active_at @@ -54,15 +56,15 @@ class ExpiresAfter < OpenAI::BaseModel # @return [Integer] required :days, Integer - # @!parse - # # The expiration policy for a vector store. - # # - # # @param days [Integer] - # # @param anchor [Symbol, :last_active_at] - # # - # def initialize(days:, anchor: :last_active_at, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(days:, anchor: :last_active_at) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter} for more details. + # + # The expiration policy for a vector store. + # + # @param days [Integer] The number of days after the anchor time that the vector store will expire. + # + # @param anchor [Symbol, :last_active_at] Anchor timestamp after which the expiration policy applies. Supported anchors: ` end end end diff --git a/lib/openai/models/vector_stores/file_batch_cancel_params.rb b/lib/openai/models/vector_stores/file_batch_cancel_params.rb index 045c91ff..c6f2f182 100644 --- a/lib/openai/models/vector_stores/file_batch_cancel_params.rb +++ b/lib/openai/models/vector_stores/file_batch_cancel_params.rb @@ -3,23 +3,19 @@ module OpenAI module Models module VectorStores - class FileBatchCancelParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores::FileBatches#cancel + class FileBatchCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute vector_store_id # # @return [String] required :vector_store_id, String - # @!parse - # # @param vector_store_id [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(vector_store_id:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_id:, request_options: {}) + # @param vector_store_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/vector_stores/file_batch_create_params.rb b/lib/openai/models/vector_stores/file_batch_create_params.rb index f5859e61..0815e0f1 100644 --- a/lib/openai/models/vector_stores/file_batch_create_params.rb +++ b/lib/openai/models/vector_stores/file_batch_create_params.rb @@ -3,60 +3,63 @@ module OpenAI module Models module VectorStores - class FileBatchCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores::FileBatches#create + class FileBatchCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute file_ids # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. + # the vector store should use. Useful for tools like `file_search` that can access + # files. # # @return [Array] - required :file_ids, OpenAI::ArrayOf[String] + required :file_ids, OpenAI::Internal::Type::ArrayOf[String] # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] optional :attributes, - -> { OpenAI::HashOf[union: OpenAI::Models::VectorStores::FileBatchCreateParams::Attribute] }, + -> { + OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::FileBatchCreateParams::Attribute] + }, nil?: true - # @!attribute [r] chunking_strategy + # @!attribute chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. # # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil] - optional :chunking_strategy, union: -> { OpenAI::Models::FileChunkingStrategyParam } + optional :chunking_strategy, union: -> { OpenAI::FileChunkingStrategyParam } - # @!parse - # # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] - # attr_writer :chunking_strategy - - # @!parse - # # @param file_ids [Array] - # # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] - # # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(file_ids:, attributes: nil, chunking_strategy: nil, request_options: {}, **) = super + # @!method initialize(file_ids:, attributes: nil, chunking_strategy: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileBatchCreateParams} for more details. + # + # @param file_ids [Array] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # + # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # def initialize: (Hash | OpenAI::BaseModel) -> void + module Attribute + extend OpenAI::Internal::Type::Union - # @abstract - # - class Attribute < OpenAI::Union variant String variant Float - variant OpenAI::BooleanModel + variant OpenAI::Internal::Type::Boolean + + # @!method self.variants + # @return [Array(String, Float, Boolean)] end end end diff --git a/lib/openai/models/vector_stores/file_batch_list_files_params.rb b/lib/openai/models/vector_stores/file_batch_list_files_params.rb index 40629c3c..d9f5bb5c 100644 --- a/lib/openai/models/vector_stores/file_batch_list_files_params.rb +++ b/lib/openai/models/vector_stores/file_batch_list_files_params.rb @@ -3,108 +3,95 @@ module OpenAI module Models module VectorStores - class FileBatchListFilesParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores::FileBatches#list_files + class FileBatchListFilesParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute vector_store_id # # @return [String] required :vector_store_id, String - # @!attribute [r] after + # @!attribute after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] before + # @!attribute before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String - # @!parse - # # @return [String] - # attr_writer :before - - # @!attribute [r] filter + # @!attribute filter # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. # # @return [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Filter, nil] - optional :filter, enum: -> { OpenAI::Models::VectorStores::FileBatchListFilesParams::Filter } - - # @!parse - # # @return [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Filter] - # attr_writer :filter + optional :filter, enum: -> { OpenAI::VectorStores::FileBatchListFilesParams::Filter } - # @!attribute [r] limit + # @!attribute limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!attribute [r] order + # @!attribute order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::VectorStores::FileBatchListFilesParams::Order } + optional :order, enum: -> { OpenAI::VectorStores::FileBatchListFilesParams::Order } - # @!parse - # # @return [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Order] - # attr_writer :order - - # @!parse - # # @param vector_store_id [String] - # # @param after [String] - # # @param before [String] - # # @param filter [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Filter] - # # @param limit [Integer] - # # @param order [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Order] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(vector_store_id:, after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(vector_store_id:, after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileBatchListFilesParams} for more details. + # + # @param vector_store_id [String] + # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place # + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place + # + # @param filter [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Filter] Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. - class Filter < OpenAI::Enum + module Filter + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed FAILED = :failed CANCELLED = :cancelled - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. - class Order < OpenAI::Enum + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/vector_stores/file_batch_retrieve_params.rb b/lib/openai/models/vector_stores/file_batch_retrieve_params.rb index 9856a452..3ec39a5b 100644 --- a/lib/openai/models/vector_stores/file_batch_retrieve_params.rb +++ b/lib/openai/models/vector_stores/file_batch_retrieve_params.rb @@ -3,23 +3,19 @@ module OpenAI module Models module VectorStores - class FileBatchRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores::FileBatches#retrieve + class FileBatchRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute vector_store_id # # @return [String] required :vector_store_id, String - # @!parse - # # @param vector_store_id [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(vector_store_id:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_id:, request_options: {}) + # @param vector_store_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/vector_stores/file_content_params.rb b/lib/openai/models/vector_stores/file_content_params.rb index c5d82123..0dbc1139 100644 --- a/lib/openai/models/vector_stores/file_content_params.rb +++ b/lib/openai/models/vector_stores/file_content_params.rb @@ -3,23 +3,19 @@ module OpenAI module Models module VectorStores - class FileContentParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores::Files#content + class FileContentParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute vector_store_id # # @return [String] required :vector_store_id, String - # @!parse - # # @param vector_store_id [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(vector_store_id:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_id:, request_options: {}) + # @param vector_store_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/vector_stores/file_content_response.rb b/lib/openai/models/vector_stores/file_content_response.rb index a2450a19..8c9f595a 100644 --- a/lib/openai/models/vector_stores/file_content_response.rb +++ b/lib/openai/models/vector_stores/file_content_response.rb @@ -3,34 +3,24 @@ module OpenAI module Models module VectorStores - class FileContentResponse < OpenAI::BaseModel - # @!attribute [r] text + # @see OpenAI::Resources::VectorStores::Files#content + class FileContentResponse < OpenAI::Internal::Type::BaseModel + # @!attribute text # The text content # # @return [String, nil] optional :text, String - # @!parse - # # @return [String] - # attr_writer :text - - # @!attribute [r] type + # @!attribute type # The content type (currently only `"text"`) # # @return [String, nil] optional :type, String - # @!parse - # # @return [String] - # attr_writer :type - - # @!parse - # # @param text [String] - # # @param type [String] - # # - # def initialize(text: nil, type: nil, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(text: nil, type: nil) + # @param text [String] The text content + # + # @param type [String] The content type (currently only `"text"`) end end end diff --git a/lib/openai/models/vector_stores/file_create_params.rb b/lib/openai/models/vector_stores/file_create_params.rb index c91648aa..e4bfafa9 100644 --- a/lib/openai/models/vector_stores/file_create_params.rb +++ b/lib/openai/models/vector_stores/file_create_params.rb @@ -3,60 +3,63 @@ module OpenAI module Models module VectorStores - class FileCreateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores::Files#create + class FileCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute file_id # A [File](https://platform.openai.com/docs/api-reference/files) ID that the - # vector store should use. Useful for tools like `file_search` that can access - # files. + # vector store should use. Useful for tools like `file_search` that can access + # files. # # @return [String] required :file_id, String # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] optional :attributes, - -> { OpenAI::HashOf[union: OpenAI::Models::VectorStores::FileCreateParams::Attribute] }, + -> { + OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::FileCreateParams::Attribute] + }, nil?: true - # @!attribute [r] chunking_strategy + # @!attribute chunking_strategy # The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # strategy. Only applicable if `file_ids` is non-empty. # # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil] - optional :chunking_strategy, union: -> { OpenAI::Models::FileChunkingStrategyParam } + optional :chunking_strategy, union: -> { OpenAI::FileChunkingStrategyParam } - # @!parse - # # @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] - # attr_writer :chunking_strategy - - # @!parse - # # @param file_id [String] - # # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] - # # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(file_id:, attributes: nil, chunking_strategy: nil, request_options: {}, **) = super + # @!method initialize(file_id:, attributes: nil, chunking_strategy: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileCreateParams} for more details. + # + # @param file_id [String] A [File](https://platform.openai.com/docs/api-reference/files) ID that the vecto + # + # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # def initialize: (Hash | OpenAI::BaseModel) -> void + module Attribute + extend OpenAI::Internal::Type::Union - # @abstract - # - class Attribute < OpenAI::Union variant String variant Float - variant OpenAI::BooleanModel + variant OpenAI::Internal::Type::Boolean + + # @!method self.variants + # @return [Array(String, Float, Boolean)] end end end diff --git a/lib/openai/models/vector_stores/file_delete_params.rb b/lib/openai/models/vector_stores/file_delete_params.rb index 3a22fe46..ef1c9179 100644 --- a/lib/openai/models/vector_stores/file_delete_params.rb +++ b/lib/openai/models/vector_stores/file_delete_params.rb @@ -3,23 +3,19 @@ module OpenAI module Models module VectorStores - class FileDeleteParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores::Files#delete + class FileDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute vector_store_id # # @return [String] required :vector_store_id, String - # @!parse - # # @param vector_store_id [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(vector_store_id:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_id:, request_options: {}) + # @param vector_store_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/vector_stores/file_list_params.rb b/lib/openai/models/vector_stores/file_list_params.rb index c227d297..2540afea 100644 --- a/lib/openai/models/vector_stores/file_list_params.rb +++ b/lib/openai/models/vector_stores/file_list_params.rb @@ -3,102 +3,88 @@ module OpenAI module Models module VectorStores - class FileListParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores::Files#list + class FileListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - # @!attribute [r] after + # @!attribute after # A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. # # @return [String, nil] optional :after, String - # @!parse - # # @return [String] - # attr_writer :after - - # @!attribute [r] before + # @!attribute before # A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. # # @return [String, nil] optional :before, String - # @!parse - # # @return [String] - # attr_writer :before - - # @!attribute [r] filter + # @!attribute filter # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. # # @return [Symbol, OpenAI::Models::VectorStores::FileListParams::Filter, nil] - optional :filter, enum: -> { OpenAI::Models::VectorStores::FileListParams::Filter } - - # @!parse - # # @return [Symbol, OpenAI::Models::VectorStores::FileListParams::Filter] - # attr_writer :filter + optional :filter, enum: -> { OpenAI::VectorStores::FileListParams::Filter } - # @!attribute [r] limit + # @!attribute limit # A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # 100, and the default is 20. # # @return [Integer, nil] optional :limit, Integer - # @!parse - # # @return [Integer] - # attr_writer :limit - - # @!attribute [r] order + # @!attribute order # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # order and `desc` for descending order. # # @return [Symbol, OpenAI::Models::VectorStores::FileListParams::Order, nil] - optional :order, enum: -> { OpenAI::Models::VectorStores::FileListParams::Order } + optional :order, enum: -> { OpenAI::VectorStores::FileListParams::Order } - # @!parse - # # @return [Symbol, OpenAI::Models::VectorStores::FileListParams::Order] - # attr_writer :order - - # @!parse - # # @param after [String] - # # @param before [String] - # # @param filter [Symbol, OpenAI::Models::VectorStores::FileListParams::Filter] - # # @param limit [Integer] - # # @param order [Symbol, OpenAI::Models::VectorStores::FileListParams::Order] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileListParams} for more details. + # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place # + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place + # + # @param filter [Symbol, OpenAI::Models::VectorStores::FileListParams::Filter] Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::VectorStores::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. - class Filter < OpenAI::Enum + module Filter + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed FAILED = :failed CANCELLED = :cancelled - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # # Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. - class Order < OpenAI::Enum + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + ASC = :asc DESC = :desc - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/vector_stores/file_retrieve_params.rb b/lib/openai/models/vector_stores/file_retrieve_params.rb index af408c6c..3a301cdb 100644 --- a/lib/openai/models/vector_stores/file_retrieve_params.rb +++ b/lib/openai/models/vector_stores/file_retrieve_params.rb @@ -3,23 +3,19 @@ module OpenAI module Models module VectorStores - class FileRetrieveParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores::Files#retrieve + class FileRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute vector_store_id # # @return [String] required :vector_store_id, String - # @!parse - # # @param vector_store_id [String] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(vector_store_id:, request_options: {}, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(vector_store_id:, request_options: {}) + # @param vector_store_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] end end end diff --git a/lib/openai/models/vector_stores/file_update_params.rb b/lib/openai/models/vector_stores/file_update_params.rb index 047e821f..be3d5d7b 100644 --- a/lib/openai/models/vector_stores/file_update_params.rb +++ b/lib/openai/models/vector_stores/file_update_params.rb @@ -3,10 +3,10 @@ module OpenAI module Models module VectorStores - class FileUpdateParams < OpenAI::BaseModel - # @!parse - # extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + # @see OpenAI::Resources::VectorStores::Files#update + class FileUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters # @!attribute vector_store_id # @@ -15,33 +15,39 @@ class FileUpdateParams < OpenAI::BaseModel # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] required :attributes, - -> { OpenAI::HashOf[union: OpenAI::Models::VectorStores::FileUpdateParams::Attribute] }, + -> { + OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::FileUpdateParams::Attribute] + }, nil?: true - # @!parse - # # @param vector_store_id [String] - # # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] - # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # # - # def initialize(vector_store_id:, attributes:, request_options: {}, **) = super + # @!method initialize(vector_store_id:, attributes:, request_options: {}) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileUpdateParams} for more details. + # + # @param vector_store_id [String] + # + # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # def initialize: (Hash | OpenAI::BaseModel) -> void + module Attribute + extend OpenAI::Internal::Type::Union - # @abstract - # - class Attribute < OpenAI::Union variant String variant Float - variant OpenAI::BooleanModel + variant OpenAI::Internal::Type::Boolean + + # @!method self.variants + # @return [Array(String, Float, Boolean)] end end end diff --git a/lib/openai/models/vector_stores/vector_store_file.rb b/lib/openai/models/vector_stores/vector_store_file.rb index 27d1234a..c6e737e3 100644 --- a/lib/openai/models/vector_stores/vector_store_file.rb +++ b/lib/openai/models/vector_stores/vector_store_file.rb @@ -3,7 +3,8 @@ module OpenAI module Models module VectorStores - class VectorStoreFile < OpenAI::BaseModel + # @see OpenAI::Resources::VectorStores::Files#create + class VectorStoreFile < OpenAI::Internal::Type::BaseModel # @!attribute id # The identifier, which can be referenced in API endpoints. # @@ -18,10 +19,10 @@ class VectorStoreFile < OpenAI::BaseModel # @!attribute last_error # The last error associated with this vector store file. Will be `null` if there - # are no errors. + # are no errors. # # @return [OpenAI::Models::VectorStores::VectorStoreFile::LastError, nil] - required :last_error, -> { OpenAI::Models::VectorStores::VectorStoreFile::LastError }, nil?: true + required :last_error, -> { OpenAI::VectorStores::VectorStoreFile::LastError }, nil?: true # @!attribute object # The object type, which is always `vector_store.file`. @@ -31,86 +32,79 @@ class VectorStoreFile < OpenAI::BaseModel # @!attribute status # The status of the vector store file, which can be either `in_progress`, - # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the - # vector store file is ready for use. + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. # # @return [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::Status] - required :status, enum: -> { OpenAI::Models::VectorStores::VectorStoreFile::Status } + required :status, enum: -> { OpenAI::VectorStores::VectorStoreFile::Status } # @!attribute usage_bytes # The total vector store usage in bytes. Note that this may be different from the - # original file size. + # original file size. # # @return [Integer] required :usage_bytes, Integer # @!attribute vector_store_id # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # that the [File](https://platform.openai.com/docs/api-reference/files) is - # attached to. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. # # @return [String] required :vector_store_id, String # @!attribute attributes # Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. # # @return [Hash{Symbol=>String, Float, Boolean}, nil] optional :attributes, - -> { OpenAI::HashOf[union: OpenAI::Models::VectorStores::VectorStoreFile::Attribute] }, + -> { + OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::VectorStoreFile::Attribute] + }, nil?: true - # @!attribute [r] chunking_strategy + # @!attribute chunking_strategy # The strategy used to chunk the file. # # @return [OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject, nil] - optional :chunking_strategy, union: -> { OpenAI::Models::FileChunkingStrategy } - - # @!parse - # # @return [OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject] - # attr_writer :chunking_strategy - - # @!parse - # # A list of files attached to a vector store. - # # - # # @param id [String] - # # @param created_at [Integer] - # # @param last_error [OpenAI::Models::VectorStores::VectorStoreFile::LastError, nil] - # # @param status [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::Status] - # # @param usage_bytes [Integer] - # # @param vector_store_id [String] - # # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] - # # @param chunking_strategy [OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject] - # # @param object [Symbol, :"vector_store.file"] - # # - # def initialize( - # id:, - # created_at:, - # last_error:, - # status:, - # usage_bytes:, - # vector_store_id:, - # attributes: nil, - # chunking_strategy: nil, - # object: :"vector_store.file", - # ** - # ) - # super - # end - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class LastError < OpenAI::BaseModel + optional :chunking_strategy, union: -> { OpenAI::FileChunkingStrategy } + + # @!method initialize(id:, created_at:, last_error:, status:, usage_bytes:, vector_store_id:, attributes: nil, chunking_strategy: nil, object: :"vector_store.file") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::VectorStoreFile} for more details. + # + # A list of files attached to a vector store. + # + # @param id [String] The identifier, which can be referenced in API endpoints. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the vector store file was created. + # + # @param last_error [OpenAI::Models::VectorStores::VectorStoreFile::LastError, nil] The last error associated with this vector store file. Will be `null` if there a + # + # @param status [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::Status] The status of the vector store file, which can be either `in_progress`, `complet + # + # @param usage_bytes [Integer] The total vector store usage in bytes. Note that this may be different from the + # + # @param vector_store_id [String] The ID of the [vector store](https://platform.openai.com/docs/api-reference/vect + # + # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param chunking_strategy [OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject] The strategy used to chunk the file. + # + # @param object [Symbol, :"vector_store.file"] The object type, which is always `vector_store.file`. + + # @see OpenAI::Models::VectorStores::VectorStoreFile#last_error + class LastError < OpenAI::Internal::Type::BaseModel # @!attribute code # One of `server_error` or `rate_limit_exceeded`. # # @return [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code] - required :code, enum: -> { OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code } + required :code, enum: -> { OpenAI::VectorStores::VectorStoreFile::LastError::Code } # @!attribute message # A human-readable description of the error. @@ -118,51 +112,57 @@ class LastError < OpenAI::BaseModel # @return [String] required :message, String - # @!parse - # # The last error associated with this vector store file. Will be `null` if there - # # are no errors. - # # - # # @param code [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code] - # # @param message [String] - # # - # def initialize(code:, message:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - # @abstract + # @!method initialize(code:, message:) + # The last error associated with this vector store file. Will be `null` if there + # are no errors. # + # @param code [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code] One of `server_error` or `rate_limit_exceeded`. + # + # @param message [String] A human-readable description of the error. + # One of `server_error` or `rate_limit_exceeded`. - class Code < OpenAI::Enum + # + # @see OpenAI::Models::VectorStores::VectorStoreFile::LastError#code + module Code + extend OpenAI::Internal::Type::Enum + SERVER_ERROR = :server_error UNSUPPORTED_FILE = :unsupported_file INVALID_FILE = :invalid_file - finalize! + # @!method self.values + # @return [Array] end end - # @abstract - # # The status of the vector store file, which can be either `in_progress`, - # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the - # vector store file is ready for use. - class Status < OpenAI::Enum + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. + # + # @see OpenAI::Models::VectorStores::VectorStoreFile#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed CANCELLED = :cancelled FAILED = :failed - finalize! + # @!method self.values + # @return [Array] end - # @abstract - # - class Attribute < OpenAI::Union + module Attribute + extend OpenAI::Internal::Type::Union + variant String variant Float - variant OpenAI::BooleanModel + variant OpenAI::Internal::Type::Boolean + + # @!method self.variants + # @return [Array(String, Float, Boolean)] end end end diff --git a/lib/openai/models/vector_stores/vector_store_file_batch.rb b/lib/openai/models/vector_stores/vector_store_file_batch.rb index da82a1f6..f4b251ee 100644 --- a/lib/openai/models/vector_stores/vector_store_file_batch.rb +++ b/lib/openai/models/vector_stores/vector_store_file_batch.rb @@ -3,7 +3,8 @@ module OpenAI module Models module VectorStores - class VectorStoreFileBatch < OpenAI::BaseModel + # @see OpenAI::Resources::VectorStores::FileBatches#create + class VectorStoreFileBatch < OpenAI::Internal::Type::BaseModel # @!attribute id # The identifier, which can be referenced in API endpoints. # @@ -12,7 +13,7 @@ class VectorStoreFileBatch < OpenAI::BaseModel # @!attribute created_at # The Unix timestamp (in seconds) for when the vector store files batch was - # created. + # created. # # @return [Integer] required :created_at, Integer @@ -20,7 +21,7 @@ class VectorStoreFileBatch < OpenAI::BaseModel # @!attribute file_counts # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts] - required :file_counts, -> { OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts } + required :file_counts, -> { OpenAI::VectorStores::VectorStoreFileBatch::FileCounts } # @!attribute object # The object type, which is always `vector_store.file_batch`. @@ -30,35 +31,40 @@ class VectorStoreFileBatch < OpenAI::BaseModel # @!attribute status # The status of the vector store files batch, which can be either `in_progress`, - # `completed`, `cancelled` or `failed`. + # `completed`, `cancelled` or `failed`. # # @return [Symbol, OpenAI::Models::VectorStores::VectorStoreFileBatch::Status] - required :status, enum: -> { OpenAI::Models::VectorStores::VectorStoreFileBatch::Status } + required :status, enum: -> { OpenAI::VectorStores::VectorStoreFileBatch::Status } # @!attribute vector_store_id # The ID of the - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) - # that the [File](https://platform.openai.com/docs/api-reference/files) is - # attached to. + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. # # @return [String] required :vector_store_id, String - # @!parse - # # A batch of files attached to a vector store. - # # - # # @param id [String] - # # @param created_at [Integer] - # # @param file_counts [OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts] - # # @param status [Symbol, OpenAI::Models::VectorStores::VectorStoreFileBatch::Status] - # # @param vector_store_id [String] - # # @param object [Symbol, :"vector_store.files_batch"] - # # - # def initialize(id:, created_at:, file_counts:, status:, vector_store_id:, object: :"vector_store.files_batch", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void - - class FileCounts < OpenAI::BaseModel + # @!method initialize(id:, created_at:, file_counts:, status:, vector_store_id:, object: :"vector_store.files_batch") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::VectorStoreFileBatch} for more details. + # + # A batch of files attached to a vector store. + # + # @param id [String] The identifier, which can be referenced in API endpoints. + # + # @param created_at [Integer] The Unix timestamp (in seconds) for when the vector store files batch was create + # + # @param file_counts [OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts] + # + # @param status [Symbol, OpenAI::Models::VectorStores::VectorStoreFileBatch::Status] The status of the vector store files batch, which can be either `in_progress`, ` + # + # @param vector_store_id [String] The ID of the [vector store](https://platform.openai.com/docs/api-reference/vect + # + # @param object [Symbol, :"vector_store.files_batch"] The object type, which is always `vector_store.file_batch`. + + # @see OpenAI::Models::VectorStores::VectorStoreFileBatch#file_counts + class FileCounts < OpenAI::Internal::Type::BaseModel # @!attribute cancelled # The number of files that where cancelled. # @@ -89,29 +95,32 @@ class FileCounts < OpenAI::BaseModel # @return [Integer] required :total, Integer - # @!parse - # # @param cancelled [Integer] - # # @param completed [Integer] - # # @param failed [Integer] - # # @param in_progress [Integer] - # # @param total [Integer] - # # - # def initialize(cancelled:, completed:, failed:, in_progress:, total:, **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(cancelled:, completed:, failed:, in_progress:, total:) + # @param cancelled [Integer] The number of files that where cancelled. + # + # @param completed [Integer] The number of files that have been processed. + # + # @param failed [Integer] The number of files that have failed to process. + # + # @param in_progress [Integer] The number of files that are currently being processed. + # + # @param total [Integer] The total number of files. end - # @abstract - # # The status of the vector store files batch, which can be either `in_progress`, - # `completed`, `cancelled` or `failed`. - class Status < OpenAI::Enum + # `completed`, `cancelled` or `failed`. + # + # @see OpenAI::Models::VectorStores::VectorStoreFileBatch#status + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS = :in_progress COMPLETED = :completed CANCELLED = :cancelled FAILED = :failed - finalize! + # @!method self.values + # @return [Array] end end end diff --git a/lib/openai/models/vector_stores/vector_store_file_deleted.rb b/lib/openai/models/vector_stores/vector_store_file_deleted.rb index 9a226ad7..971629db 100644 --- a/lib/openai/models/vector_stores/vector_store_file_deleted.rb +++ b/lib/openai/models/vector_stores/vector_store_file_deleted.rb @@ -3,7 +3,8 @@ module OpenAI module Models module VectorStores - class VectorStoreFileDeleted < OpenAI::BaseModel + # @see OpenAI::Resources::VectorStores::Files#delete + class VectorStoreFileDeleted < OpenAI::Internal::Type::BaseModel # @!attribute id # # @return [String] @@ -12,21 +13,17 @@ class VectorStoreFileDeleted < OpenAI::BaseModel # @!attribute deleted # # @return [Boolean] - required :deleted, OpenAI::BooleanModel + required :deleted, OpenAI::Internal::Type::Boolean # @!attribute object # # @return [Symbol, :"vector_store.file.deleted"] required :object, const: :"vector_store.file.deleted" - # @!parse - # # @param id [String] - # # @param deleted [Boolean] - # # @param object [Symbol, :"vector_store.file.deleted"] - # # - # def initialize(id:, deleted:, object: :"vector_store.file.deleted", **) = super - - # def initialize: (Hash | OpenAI::BaseModel) -> void + # @!method initialize(id:, deleted:, object: :"vector_store.file.deleted") + # @param id [String] + # @param deleted [Boolean] + # @param object [Symbol, :"vector_store.file.deleted"] end end diff --git a/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb b/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb new file mode 100644 index 00000000..1cef64b5 --- /dev/null +++ b/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class BatchCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the batch API request was cancelled. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::BatchCancelledWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `batch.cancelled`. + # + # @return [Symbol, :"batch.cancelled"] + required :type, const: :"batch.cancelled" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::BatchCancelledWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"batch.cancelled") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::BatchCancelledWebhookEvent} for more details. + # + # Sent when a batch API request has been cancelled. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the batch API request was cancelled. + # + # @param data [OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"batch.cancelled"] The type of the event. Always `batch.cancelled`. + + # @see OpenAI::Models::Webhooks::BatchCancelledWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the batch API request. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::Data} for more details. + # + # Event data payload. + # + # @param id [String] The unique ID of the batch API request. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::BatchCancelledWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/batch_completed_webhook_event.rb b/lib/openai/models/webhooks/batch_completed_webhook_event.rb new file mode 100644 index 00000000..fb130f53 --- /dev/null +++ b/lib/openai/models/webhooks/batch_completed_webhook_event.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class BatchCompletedWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the batch API request was completed. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::BatchCompletedWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `batch.completed`. + # + # @return [Symbol, :"batch.completed"] + required :type, const: :"batch.completed" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::BatchCompletedWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"batch.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::BatchCompletedWebhookEvent} for more details. + # + # Sent when a batch API request has been completed. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the batch API request was completed. + # + # @param data [OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"batch.completed"] The type of the event. Always `batch.completed`. + + # @see OpenAI::Models::Webhooks::BatchCompletedWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the batch API request. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::Data} for more details. + # + # Event data payload. + # + # @param id [String] The unique ID of the batch API request. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::BatchCompletedWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/batch_expired_webhook_event.rb b/lib/openai/models/webhooks/batch_expired_webhook_event.rb new file mode 100644 index 00000000..cf0bb285 --- /dev/null +++ b/lib/openai/models/webhooks/batch_expired_webhook_event.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class BatchExpiredWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the batch API request expired. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::BatchExpiredWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `batch.expired`. + # + # @return [Symbol, :"batch.expired"] + required :type, const: :"batch.expired" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::BatchExpiredWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"batch.expired") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::BatchExpiredWebhookEvent} for more details. + # + # Sent when a batch API request has expired. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the batch API request expired. + # + # @param data [OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"batch.expired"] The type of the event. Always `batch.expired`. + + # @see OpenAI::Models::Webhooks::BatchExpiredWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the batch API request. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::Data} for more details. + # + # Event data payload. + # + # @param id [String] The unique ID of the batch API request. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::BatchExpiredWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/batch_failed_webhook_event.rb b/lib/openai/models/webhooks/batch_failed_webhook_event.rb new file mode 100644 index 00000000..c84be5ee --- /dev/null +++ b/lib/openai/models/webhooks/batch_failed_webhook_event.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class BatchFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the batch API request failed. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::BatchFailedWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::BatchFailedWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `batch.failed`. + # + # @return [Symbol, :"batch.failed"] + required :type, const: :"batch.failed" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::BatchFailedWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::BatchFailedWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"batch.failed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::BatchFailedWebhookEvent} for more details. + # + # Sent when a batch API request has failed. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the batch API request failed. + # + # @param data [OpenAI::Models::Webhooks::BatchFailedWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::BatchFailedWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"batch.failed"] The type of the event. Always `batch.failed`. + + # @see OpenAI::Models::Webhooks::BatchFailedWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the batch API request. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::BatchFailedWebhookEvent::Data} for more details. + # + # Event data payload. + # + # @param id [String] The unique ID of the batch API request. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::BatchFailedWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb b/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb new file mode 100644 index 00000000..684cd7f0 --- /dev/null +++ b/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class EvalRunCanceledWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the eval run was canceled. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `eval.run.canceled`. + # + # @return [Symbol, :"eval.run.canceled"] + required :type, const: :"eval.run.canceled" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"eval.run.canceled") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent} for more details. + # + # Sent when an eval run has been canceled. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the eval run was canceled. + # + # @param data [OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"eval.run.canceled"] The type of the event. Always `eval.run.canceled`. + + # @see OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the eval run. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::Data} for more details. + # + # Event data payload. + # + # @param id [String] The unique ID of the eval run. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb b/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb new file mode 100644 index 00000000..c0db2b68 --- /dev/null +++ b/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class EvalRunFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the eval run failed. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `eval.run.failed`. + # + # @return [Symbol, :"eval.run.failed"] + required :type, const: :"eval.run.failed" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::EvalRunFailedWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"eval.run.failed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent} for more details. + # + # Sent when an eval run has failed. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the eval run failed. + # + # @param data [OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"eval.run.failed"] The type of the event. Always `eval.run.failed`. + + # @see OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the eval run. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::Data} for more details. + # + # Event data payload. + # + # @param id [String] The unique ID of the eval run. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb b/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb new file mode 100644 index 00000000..55321f79 --- /dev/null +++ b/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class EvalRunSucceededWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the eval run succeeded. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `eval.run.succeeded`. + # + # @return [Symbol, :"eval.run.succeeded"] + required :type, const: :"eval.run.succeeded" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"eval.run.succeeded") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent} for more details. + # + # Sent when an eval run has succeeded. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the eval run succeeded. + # + # @param data [OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"eval.run.succeeded"] The type of the event. Always `eval.run.succeeded`. + + # @see OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the eval run. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::Data} for more details. + # + # Event data payload. + # + # @param id [String] The unique ID of the eval run. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb b/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb new file mode 100644 index 00000000..66d6c0f3 --- /dev/null +++ b/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class FineTuningJobCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the fine-tuning job was cancelled. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `fine_tuning.job.cancelled`. + # + # @return [Symbol, :"fine_tuning.job.cancelled"] + required :type, const: :"fine_tuning.job.cancelled" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"fine_tuning.job.cancelled") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent} for more details. + # + # Sent when a fine-tuning job has been cancelled. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the fine-tuning job was cancelled. + # + # @param data [OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"fine_tuning.job.cancelled"] The type of the event. Always `fine_tuning.job.cancelled`. + + # @see OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the fine-tuning job. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::Data} for more + # details. + # + # Event data payload. + # + # @param id [String] The unique ID of the fine-tuning job. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb b/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb new file mode 100644 index 00000000..160a2a3c --- /dev/null +++ b/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class FineTuningJobFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the fine-tuning job failed. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `fine_tuning.job.failed`. + # + # @return [Symbol, :"fine_tuning.job.failed"] + required :type, const: :"fine_tuning.job.failed" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"fine_tuning.job.failed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent} for more details. + # + # Sent when a fine-tuning job has failed. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the fine-tuning job failed. + # + # @param data [OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"fine_tuning.job.failed"] The type of the event. Always `fine_tuning.job.failed`. + + # @see OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the fine-tuning job. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::Data} for more + # details. + # + # Event data payload. + # + # @param id [String] The unique ID of the fine-tuning job. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb b/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb new file mode 100644 index 00000000..20c1ee68 --- /dev/null +++ b/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class FineTuningJobSucceededWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the fine-tuning job succeeded. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `fine_tuning.job.succeeded`. + # + # @return [Symbol, :"fine_tuning.job.succeeded"] + required :type, const: :"fine_tuning.job.succeeded" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"fine_tuning.job.succeeded") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent} for more details. + # + # Sent when a fine-tuning job has succeeded. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the fine-tuning job succeeded. + # + # @param data [OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"fine_tuning.job.succeeded"] The type of the event. Always `fine_tuning.job.succeeded`. + + # @see OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the fine-tuning job. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::Data} for more + # details. + # + # Event data payload. + # + # @param id [String] The unique ID of the fine-tuning job. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/response_cancelled_webhook_event.rb b/lib/openai/models/webhooks/response_cancelled_webhook_event.rb new file mode 100644 index 00000000..8e3d0632 --- /dev/null +++ b/lib/openai/models/webhooks/response_cancelled_webhook_event.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class ResponseCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the model response was cancelled. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `response.cancelled`. + # + # @return [Symbol, :"response.cancelled"] + required :type, const: :"response.cancelled" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::ResponseCancelledWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"response.cancelled") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent} for more details. + # + # Sent when a background response has been cancelled. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the model response was cancelled. + # + # @param data [OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"response.cancelled"] The type of the event. Always `response.cancelled`. + + # @see OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the model response. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::Data} for more + # details. + # + # Event data payload. + # + # @param id [String] The unique ID of the model response. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/response_completed_webhook_event.rb b/lib/openai/models/webhooks/response_completed_webhook_event.rb new file mode 100644 index 00000000..9228af45 --- /dev/null +++ b/lib/openai/models/webhooks/response_completed_webhook_event.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class ResponseCompletedWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the model response was completed. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `response.completed`. + # + # @return [Symbol, :"response.completed"] + required :type, const: :"response.completed" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::ResponseCompletedWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"response.completed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent} for more details. + # + # Sent when a background response has been completed. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the model response was completed. + # + # @param data [OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"response.completed"] The type of the event. Always `response.completed`. + + # @see OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the model response. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::Data} for more + # details. + # + # Event data payload. + # + # @param id [String] The unique ID of the model response. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/response_failed_webhook_event.rb b/lib/openai/models/webhooks/response_failed_webhook_event.rb new file mode 100644 index 00000000..fe54ea51 --- /dev/null +++ b/lib/openai/models/webhooks/response_failed_webhook_event.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class ResponseFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the model response failed. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::ResponseFailedWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `response.failed`. + # + # @return [Symbol, :"response.failed"] + required :type, const: :"response.failed" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::ResponseFailedWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"response.failed") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::ResponseFailedWebhookEvent} for more details. + # + # Sent when a background response has failed. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the model response failed. + # + # @param data [OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"response.failed"] The type of the event. Always `response.failed`. + + # @see OpenAI::Models::Webhooks::ResponseFailedWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the model response. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::Data} for more details. + # + # Event data payload. + # + # @param id [String] The unique ID of the model response. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::ResponseFailedWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/response_incomplete_webhook_event.rb b/lib/openai/models/webhooks/response_incomplete_webhook_event.rb new file mode 100644 index 00000000..9dcecc9e --- /dev/null +++ b/lib/openai/models/webhooks/response_incomplete_webhook_event.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + class ResponseIncompleteWebhookEvent < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the event. + # + # @return [String] + required :id, String + + # @!attribute created_at + # The Unix timestamp (in seconds) of when the model response was interrupted. + # + # @return [Integer] + required :created_at, Integer + + # @!attribute data + # Event data payload. + # + # @return [OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::Data] + required :data, -> { OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data } + + # @!attribute type + # The type of the event. Always `response.incomplete`. + # + # @return [Symbol, :"response.incomplete"] + required :type, const: :"response.incomplete" + + # @!attribute object + # The object of the event. Always `event`. + # + # @return [Symbol, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::Object, nil] + optional :object, enum: -> { OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object } + + # @!method initialize(id:, created_at:, data:, object: nil, type: :"response.incomplete") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent} for more details. + # + # Sent when a background response has been interrupted. + # + # @param id [String] The unique ID of the event. + # + # @param created_at [Integer] The Unix timestamp (in seconds) of when the model response was interrupted. + # + # @param data [OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::Data] Event data payload. + # + # @param object [Symbol, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::Object] The object of the event. Always `event`. + # + # @param type [Symbol, :"response.incomplete"] The type of the event. Always `response.incomplete`. + + # @see OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent#data + class Data < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The unique ID of the model response. + # + # @return [String] + required :id, String + + # @!method initialize(id:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::Data} for more + # details. + # + # Event data payload. + # + # @param id [String] The unique ID of the model response. + end + + # The object of the event. Always `event`. + # + # @see OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent#object + module Object + extend OpenAI::Internal::Type::Enum + + EVENT = :event + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/webhooks/unwrap_webhook_event.rb b/lib/openai/models/webhooks/unwrap_webhook_event.rb new file mode 100644 index 00000000..821aa816 --- /dev/null +++ b/lib/openai/models/webhooks/unwrap_webhook_event.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + # Sent when a batch API request has been cancelled. + module UnwrapWebhookEvent + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Sent when a batch API request has been cancelled. + variant :"batch.cancelled", -> { OpenAI::Webhooks::BatchCancelledWebhookEvent } + + # Sent when a batch API request has been completed. + variant :"batch.completed", -> { OpenAI::Webhooks::BatchCompletedWebhookEvent } + + # Sent when a batch API request has expired. + variant :"batch.expired", -> { OpenAI::Webhooks::BatchExpiredWebhookEvent } + + # Sent when a batch API request has failed. + variant :"batch.failed", -> { OpenAI::Webhooks::BatchFailedWebhookEvent } + + # Sent when an eval run has been canceled. + variant :"eval.run.canceled", -> { OpenAI::Webhooks::EvalRunCanceledWebhookEvent } + + # Sent when an eval run has failed. + variant :"eval.run.failed", -> { OpenAI::Webhooks::EvalRunFailedWebhookEvent } + + # Sent when an eval run has succeeded. + variant :"eval.run.succeeded", -> { OpenAI::Webhooks::EvalRunSucceededWebhookEvent } + + # Sent when a fine-tuning job has been cancelled. + variant :"fine_tuning.job.cancelled", -> { OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent } + + # Sent when a fine-tuning job has failed. + variant :"fine_tuning.job.failed", -> { OpenAI::Webhooks::FineTuningJobFailedWebhookEvent } + + # Sent when a fine-tuning job has succeeded. + variant :"fine_tuning.job.succeeded", -> { OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent } + + # Sent when a background response has been cancelled. + variant :"response.cancelled", -> { OpenAI::Webhooks::ResponseCancelledWebhookEvent } + + # Sent when a background response has been completed. + variant :"response.completed", -> { OpenAI::Webhooks::ResponseCompletedWebhookEvent } + + # Sent when a background response has failed. + variant :"response.failed", -> { OpenAI::Webhooks::ResponseFailedWebhookEvent } + + # Sent when a background response has been interrupted. + variant :"response.incomplete", -> { OpenAI::Webhooks::ResponseIncompleteWebhookEvent } + + # @!method self.variants + # @return [Array(OpenAI::Models::Webhooks::BatchCancelledWebhookEvent, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent, OpenAI::Models::Webhooks::BatchFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent)] + end + end + end +end diff --git a/lib/openai/models/webhooks/webhook_unwrap_params.rb b/lib/openai/models/webhooks/webhook_unwrap_params.rb new file mode 100644 index 00000000..2413665c --- /dev/null +++ b/lib/openai/models/webhooks/webhook_unwrap_params.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Webhooks + # @see OpenAI::Resources::Webhooks#unwrap + class WebhookUnwrapParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + # @!method initialize(request_options: {}) + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] + end + end + end +end diff --git a/lib/openai/page.rb b/lib/openai/page.rb deleted file mode 100644 index 54fc4d39..00000000 --- a/lib/openai/page.rb +++ /dev/null @@ -1,93 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - # @example - # ```ruby - # if page.has_next? - # page = page.next_page - # end - # ``` - # - # @example - # ```ruby - # page.auto_paging_each do |model| - # puts(model) - # end - # ``` - # - # @example - # ```ruby - # models = page.to_enum.take(2) - # - # models => Array - # ``` - class Page - include OpenAI::BasePage - - # @return [Array] - attr_accessor :data - - # @return [String] - attr_accessor :object - - # rubocop:disable Lint/UnusedMethodArgument - # @private - # - # @param client [OpenAI::BaseClient] - # @param req [Hash{Symbol=>Object}] - # @param headers [Hash{String=>String}, Net::HTTPHeader] - # @param page_data [Array] - # - def initialize(client:, req:, headers:, page_data:) - @client = client - @req = req - model = req.fetch(:model) - - case page_data - in {data: Array | nil => data} - @data = data&.map { model.coerce(_1) } - else - end - - case page_data - in {object: String | nil => object} - @object = object - else - end - end - # rubocop:enable Lint/UnusedMethodArgument - - # @return [Boolean] - # - def next_page? - false - end - - # @raise [OpenAI::HTTP::Error] - # @return [OpenAI::Page] - # - def next_page - RuntimeError.new("No more pages available.") - end - - # @param blk [Proc] - # - def auto_paging_each(&blk) - unless block_given? - raise ArgumentError.new("A block must be given to ##{__method__}") - end - page = self - loop do - page.data&.each { blk.call(_1) } - break unless page.next_page? - page = page.next_page - end - end - - # @return [String] - # - def inspect - "#<#{self.class}:0x#{object_id.to_s(16)} data=#{data.inspect} object=#{object.inspect}>" - end - end -end diff --git a/lib/openai/pooled_net_requester.rb b/lib/openai/pooled_net_requester.rb deleted file mode 100644 index 57bd2e81..00000000 --- a/lib/openai/pooled_net_requester.rb +++ /dev/null @@ -1,176 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - # @private - # - class PooledNetRequester - class << self - # @private - # - # @param url [URI::Generic] - # - # @return [Net::HTTP] - # - def connect(url) - port = - case [url.port, url.scheme] - in [Integer, _] - url.port - in [nil, "http" | "ws"] - Net::HTTP.http_default_port - in [nil, "https" | "wss"] - Net::HTTP.https_default_port - end - - Net::HTTP.new(url.host, port).tap do - _1.use_ssl = %w[https wss].include?(url.scheme) - _1.max_retries = 0 - end - end - - # @private - # - # @param conn [Net::HTTP] - # @param deadline [Float] - # - def calibrate_socket_timeout(conn, deadline) - timeout = deadline - OpenAI::Util.monotonic_secs - conn.open_timeout = conn.read_timeout = conn.write_timeout = conn.continue_timeout = timeout - end - - # @private - # - # @param request [Hash{Symbol=>Object}] . - # - # @option request [Symbol] :method - # - # @option request [URI::Generic] :url - # - # @option request [Hash{String=>String}] :headers - # - # @return [Net::HTTPGenericRequest] - # - def build_request(request) - method, url, headers, body = request.fetch_values(:method, :url, :headers, :body) - req = Net::HTTPGenericRequest.new( - method.to_s.upcase, - !body.nil?, - method != :head, - url.to_s - ) - - headers.each { req[_1] = _2 } - - case body - in nil - in String - req.body = body - in StringIO - req.body = body.string - in IO - body.rewind - req.body_stream = body - end - - req - end - end - - # @private - # - # @param url [URI::Generic] - # @param blk [Proc] - # - private def with_pool(url, &blk) - origin = OpenAI::Util.uri_origin(url) - th = Thread.current - key = :"#{object_id}-#{self.class.name}-connection_in_use_for_#{origin}" - - if th[key] - tap do - conn = self.class.connect(url) - return blk.call(conn) - ensure - conn.finish if conn&.started? - end - end - - pool = - @mutex.synchronize do - @pools[origin] ||= ConnectionPool.new(size: Etc.nprocessors) do - self.class.connect(url) - end - end - - pool.with do |conn| - th[key] = true - blk.call(conn) - ensure - th[key] = nil - end - end - - # @private - # - # @param request [Hash{Symbol=>Object}] . - # - # @option request [Symbol] :method - # - # @option request [URI::Generic] :url - # - # @option request [Hash{String=>String}] :headers - # - # @option request [Object] :body - # - # @option request [Float] :deadline - # - # @return [Array(Net::HTTPResponse, Enumerable)] - # - def execute(request) - url, deadline = request.fetch_values(:url, :deadline) - req = self.class.build_request(request) - - eof = false - finished = false - enum = Enumerator.new do |y| - with_pool(url) do |conn| - next if finished - - self.class.calibrate_socket_timeout(conn, deadline) - conn.start unless conn.started? - - self.class.calibrate_socket_timeout(conn, deadline) - conn.request(req) do |rsp| - y << [conn, rsp] - break if finished - - rsp.read_body do |bytes| - y << bytes - break if finished - - self.class.calibrate_socket_timeout(conn, deadline) - end - eof = true - end - end - end - - conn, response = enum.next - body = OpenAI::Util.fused_enum(enum, external: true) do - finished = true - tap do - enum.next - rescue StopIteration - nil - end - conn.finish if !eof && conn&.started? - end - [response, (response.body = body)] - end - - def initialize - @mutex = Mutex.new - @pools = {} - end - end -end diff --git a/lib/openai/request_options.rb b/lib/openai/request_options.rb index befdf96f..ed62d70f 100644 --- a/lib/openai/request_options.rb +++ b/lib/openai/request_options.rb @@ -1,57 +1,17 @@ # frozen_string_literal: true module OpenAI - # @private - # - # @abstract - # - module RequestParameters - # @!parse - # # Options to specify HTTP behaviour for this request. - # # @return [OpenAI::RequestOptions, Hash{Symbol=>Object}] - # attr_accessor :request_options - - # @param mod [Module] - # - def self.included(mod) - return unless mod <= OpenAI::BaseModel - - mod.extend(OpenAI::RequestParameters::Converter) - mod.optional(:request_options, OpenAI::RequestOptions) - end - - # @private - # - module Converter - # @private - # - # @param params [Object] - # - # @return [Array(Object, Hash{Symbol=>Object})] - # - def dump_request(params) - case (dumped = dump(params)) - in Hash - [dumped.except(:request_options), dumped[:request_options]] - else - [dumped, nil] - end - end - end - end - # Specify HTTP behaviour to use for a specific request. These options supplement - # or override those provided at the client level. + # or override those provided at the client level. # - # When making a request, you can pass an actual {RequestOptions} instance, or - # simply pass a Hash with symbol keys matching the attributes on this class. - class RequestOptions < OpenAI::BaseModel - # @private + # When making a request, you can pass an actual {RequestOptions} instance, or + # simply pass a Hash with symbol keys matching the attributes on this class. + class RequestOptions < OpenAI::Internal::Type::BaseModel + # @api private # # @param opts [OpenAI::RequestOptions, Hash{Symbol=>Object}] # # @raise [ArgumentError] - # def self.validate!(opts) case opts in OpenAI::RequestOptions | Hash @@ -67,31 +27,31 @@ def self.validate!(opts) # @!attribute idempotency_key # Idempotency key to send with request and all associated retries. Will only be - # sent for write requests. + # sent for write requests. # # @return [String, nil] optional :idempotency_key, String # @!attribute extra_query # Extra query params to send with the request. These are `.merge`’d into any - # `query` given at the client level. + # `query` given at the client level. # # @return [Hash{String=>Array, String, nil}, nil] - optional :extra_query, OpenAI::HashOf[OpenAI::ArrayOf[String]] + optional :extra_query, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::ArrayOf[String]] # @!attribute extra_headers # Extra headers to send with the request. These are `.merged`’d into any - # `extra_headers` given at the client level. + # `extra_headers` given at the client level. # # @return [Hash{String=>String, nil}, nil] - optional :extra_headers, OpenAI::HashOf[String, nil?: true] + optional :extra_headers, OpenAI::Internal::Type::HashOf[String, nil?: true] # @!attribute extra_body # Extra data to send with the request. These are deep merged into any data - # generated as part of the normal request. + # generated as part of the normal request. # - # @return [Hash{Symbol=>Object}, nil] - optional :extra_body, OpenAI::HashOf[OpenAI::Unknown] + # @return [Object, nil] + optional :extra_body, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown] # @!attribute max_retries # Maximum number of retries to attempt after a failed initial request. @@ -105,11 +65,13 @@ def self.validate!(opts) # @return [Float, nil] optional :timeout, Float - # @!parse - # # Returns a new instance of RequestOptions. - # # - # # @param values [Hash{Symbol=>Object}] - # # - # def initialize(values = {}) = super + # @!method initialize(values = {}) + # Returns a new instance of RequestOptions. + # + # @param values [Hash{Symbol=>Object}] + + define_sorbet_constant!(:OrHash) do + T.type_alias { T.any(OpenAI::RequestOptions, OpenAI::Internal::AnyHash) } + end end end diff --git a/lib/openai/resources/audio.rb b/lib/openai/resources/audio.rb index db698a4b..2ddf1970 100644 --- a/lib/openai/resources/audio.rb +++ b/lib/openai/resources/audio.rb @@ -12,8 +12,9 @@ class Audio # @return [OpenAI::Resources::Audio::Speech] attr_reader :speech - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client @transcriptions = OpenAI::Resources::Audio::Transcriptions.new(client: client) diff --git a/lib/openai/resources/audio/speech.rb b/lib/openai/resources/audio/speech.rb index dce52041..7a7eb0fe 100644 --- a/lib/openai/resources/audio/speech.rb +++ b/lib/openai/resources/audio/speech.rb @@ -4,44 +4,47 @@ module OpenAI module Resources class Audio class Speech + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::SpeechCreateParams} for more details. + # # Generates audio from the input text. # - # @param params [OpenAI::Models::Audio::SpeechCreateParams, Hash{Symbol=>Object}] . + # @overload create(input:, model:, voice:, instructions: nil, response_format: nil, speed: nil, stream_format: nil, request_options: {}) + # + # @param input [String] The text to generate audio for. The maximum length is 4096 characters. + # + # @param model [String, Symbol, OpenAI::Models::Audio::SpeechModel] One of the available [TTS models](https://platform.openai.com/docs/models#tts): # - # @option params [String] :input The text to generate audio for. The maximum length is 4096 characters. + # @param voice [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] The voice to use when generating the audio. Supported voices are `alloy`, `ash`, # - # @option params [String, Symbol, OpenAI::Models::Audio::SpeechModel] :model One of the available [TTS models](https://platform.openai.com/docs/models#tts): - # `tts-1` or `tts-1-hd` + # @param instructions [String] Control the voice of your generated audio with additional instructions. Does not # - # @option params [Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] :voice The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - # `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the - # voices are available in the - # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + # @param response_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat] The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav # - # @option params [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat] :response_format The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, - # `wav`, and `pcm`. + # @param speed [Float] The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is # - # @option params [Float] :speed The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - # the default. + # @param stream_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat] The format to stream the audio in. Supported formats are `sse` and `audio`. `sse # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [Object] + # @return [StringIO] # + # @see OpenAI::Models::Audio::SpeechCreateParams def create(params) - parsed, options = OpenAI::Models::Audio::SpeechCreateParams.dump_request(params) + parsed, options = OpenAI::Audio::SpeechCreateParams.dump_request(params) @client.request( method: :post, path: "audio/speech", headers: {"accept" => "application/octet-stream"}, body: parsed, - model: OpenAI::Unknown, + model: StringIO, options: options ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/audio/transcriptions.rb b/lib/openai/resources/audio/transcriptions.rb index 4c158734..45570d65 100644 --- a/lib/openai/resources/audio/transcriptions.rb +++ b/lib/openai/resources/audio/transcriptions.rb @@ -4,46 +4,45 @@ module OpenAI module Resources class Audio class Transcriptions + # See {OpenAI::Resources::Audio::Transcriptions#create_streaming} for streaming + # counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranscriptionCreateParams} for more details. + # # Transcribes audio into the input language. # - # @param params [OpenAI::Models::Audio::TranscriptionCreateParams, Hash{Symbol=>Object}] . + # @overload create(file:, model:, chunking_strategy: nil, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {}) + # + # @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) to transcribe, in one of these formats: fl + # + # @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transc # - # @option params [IO, StringIO] :file The audio file object (not file name) to transcribe, in one of these formats: - # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # @param chunking_strategy [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] Controls how the audio is cut into chunks. When set to `"auto"`, the server firs # - # @option params [String, Symbol, OpenAI::Models::AudioModel] :model ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. + # @param include [Array] Additional information to include in the transcription response. # - # @option params [String] :language The language of the input audio. Supplying the input language in - # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) - # format will improve accuracy and latency. + # @param language [String] The language of the input audio. Supplying the input language in [ISO-639-1](htt # - # @option params [String] :prompt An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should match the audio language. + # @param prompt [String] An optional text to guide the model's style or continue a previous audio segment # - # @option params [Symbol, OpenAI::Models::AudioResponseFormat] :response_format The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. + # @param response_format [Symbol, OpenAI::Models::AudioResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo # - # @option params [Float] :temperature The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the # - # @option params [Array] :timestamp_granularities The timestamp granularities to populate for this transcription. - # `response_format` must be set `verbose_json` to use timestamp granularities. - # Either or both of these options are supported: `word`, or `segment`. Note: There - # is no additional latency for segment timestamps, but generating word timestamps - # incurs additional latency. + # @param timestamp_granularities [Array] The timestamp granularities to populate for this transcription. `response_format # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose] # + # @see OpenAI::Models::Audio::TranscriptionCreateParams def create(params) - parsed, options = OpenAI::Models::Audio::TranscriptionCreateParams.dump_request(params) + parsed, options = OpenAI::Audio::TranscriptionCreateParams.dump_request(params) + if parsed[:stream] + message = "Please use `#create_streaming` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "audio/transcriptions", @@ -54,8 +53,60 @@ def create(params) ) end - # @param client [OpenAI::Client] + # See {OpenAI::Resources::Audio::Transcriptions#create} for non-streaming + # counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranscriptionCreateParams} for more details. + # + # Transcribes audio into the input language. + # + # @overload create_streaming(file:, model:, chunking_strategy: nil, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {}) + # + # @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) to transcribe, in one of these formats: fl + # + # @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transc + # + # @param chunking_strategy [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] Controls how the audio is cut into chunks. When set to `"auto"`, the server firs + # + # @param include [Array] Additional information to include in the transcription response. + # + # @param language [String] The language of the input audio. Supplying the input language in [ISO-639-1](htt + # + # @param prompt [String] An optional text to guide the model's style or continue a previous audio segment # + # @param response_format [Symbol, OpenAI::Models::AudioResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo + # + # @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # + # @param timestamp_granularities [Array] The timestamp granularities to populate for this transcription. `response_format + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::Stream] + # + # @see OpenAI::Models::Audio::TranscriptionCreateParams + def create_streaming(params) + parsed, options = OpenAI::Audio::TranscriptionCreateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create` for the non-streaming use case." + raise ArgumentError.new(message) + end + parsed.store(:stream, true) + @client.request( + method: :post, + path: "audio/transcriptions", + headers: {"content-type" => "multipart/form-data", "accept" => "text/event-stream"}, + body: parsed, + stream: OpenAI::Internal::Stream, + model: OpenAI::Audio::TranscriptionStreamEvent, + options: options + ) + end + + # @api private + # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/audio/translations.rb b/lib/openai/resources/audio/translations.rb index 4e1431c1..35ce0d09 100644 --- a/lib/openai/resources/audio/translations.rb +++ b/lib/openai/resources/audio/translations.rb @@ -4,36 +4,30 @@ module OpenAI module Resources class Audio class Translations + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Audio::TranslationCreateParams} for more details. + # # Translates audio into English. # - # @param params [OpenAI::Models::Audio::TranslationCreateParams, Hash{Symbol=>Object}] . + # @overload create(file:, model:, prompt: nil, response_format: nil, temperature: nil, request_options: {}) # - # @option params [IO, StringIO] :file The audio file object (not file name) translate, in one of these formats: flac, - # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + # @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) translate, in one of these formats: flac, # - # @option params [String, Symbol, OpenAI::Models::AudioModel] :model ID of the model to use. Only `whisper-1` (which is powered by our open source - # Whisper V2 model) is currently available. + # @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. Only `whisper-1` (which is powered by our open source Wh # - # @option params [String] :prompt An optional text to guide the model's style or continue a previous audio - # segment. The - # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - # should be in English. + # @param prompt [String] An optional text to guide the model's style or continue a previous audio segment # - # @option params [Symbol, OpenAI::Models::AudioResponseFormat] :response_format The format of the output, in one of these options: `json`, `text`, `srt`, - # `verbose_json`, or `vtt`. + # @param response_format [Symbol, OpenAI::Models::Audio::TranslationCreateParams::ResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo # - # @option params [Float] :temperature The sampling temperature, between 0 and 1. Higher values like 0.8 will make the - # output more random, while lower values like 0.2 will make it more focused and - # deterministic. If set to 0, the model will use - # [log probability](https://en.wikipedia.org/wiki/Log_probability) to - # automatically increase the temperature until certain thresholds are hit. + # @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose] # + # @see OpenAI::Models::Audio::TranslationCreateParams def create(params) - parsed, options = OpenAI::Models::Audio::TranslationCreateParams.dump_request(params) + parsed, options = OpenAI::Audio::TranslationCreateParams.dump_request(params) @client.request( method: :post, path: "audio/translations", @@ -44,8 +38,9 @@ def create(params) ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/batches.rb b/lib/openai/resources/batches.rb index 751d6548..bd8f7530 100644 --- a/lib/openai/resources/batches.rb +++ b/lib/openai/resources/batches.rb @@ -3,120 +3,106 @@ module OpenAI module Resources class Batches - # Creates and executes a batch from an uploaded file of requests - # - # @param params [OpenAI::Models::BatchCreateParams, Hash{Symbol=>Object}] . + # Some parameter documentations has been truncated, see + # {OpenAI::Models::BatchCreateParams} for more details. # - # @option params [Symbol, OpenAI::Models::BatchCreateParams::CompletionWindow] :completion_window The time frame within which the batch should be processed. Currently only `24h` - # is supported. + # Creates and executes a batch from an uploaded file of requests # - # @option params [Symbol, OpenAI::Models::BatchCreateParams::Endpoint] :endpoint The endpoint to be used for all requests in the batch. Currently - # `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - # Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - # embedding inputs across all requests in the batch. + # @overload create(completion_window:, endpoint:, input_file_id:, metadata: nil, output_expires_after: nil, request_options: {}) # - # @option params [String] :input_file_id The ID of an uploaded file that contains requests for the new batch. + # @param completion_window [Symbol, OpenAI::Models::BatchCreateParams::CompletionWindow] The time frame within which the batch should be processed. Currently only `24h` # - # See [upload file](https://platform.openai.com/docs/api-reference/files/create) - # for how to upload a file. + # @param endpoint [Symbol, OpenAI::Models::BatchCreateParams::Endpoint] The endpoint to be used for all requests in the batch. Currently `/v1/responses` # - # Your input file must be formatted as a - # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), - # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - # requests, and can be up to 200 MB in size. + # @param input_file_id [String] The ID of an uploaded file that contains requests for the new batch. # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param output_expires_after [OpenAI::Models::BatchCreateParams::OutputExpiresAfter] The expiration policy for the output and/or error file that are generated for a # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Batch] # + # @see OpenAI::Models::BatchCreateParams def create(params) - parsed, options = OpenAI::Models::BatchCreateParams.dump_request(params) - @client.request( - method: :post, - path: "batches", - body: parsed, - model: OpenAI::Models::Batch, - options: options - ) + parsed, options = OpenAI::BatchCreateParams.dump_request(params) + @client.request(method: :post, path: "batches", body: parsed, model: OpenAI::Batch, options: options) end # Retrieves a batch. # - # @param batch_id [String] The ID of the batch to retrieve. + # @overload retrieve(batch_id, request_options: {}) # - # @param params [OpenAI::Models::BatchRetrieveParams, Hash{Symbol=>Object}] . + # @param batch_id [String] The ID of the batch to retrieve. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Batch] # + # @see OpenAI::Models::BatchRetrieveParams def retrieve(batch_id, params = {}) @client.request( method: :get, - path: ["batches/%0s", batch_id], - model: OpenAI::Models::Batch, + path: ["batches/%1$s", batch_id], + model: OpenAI::Batch, options: params[:request_options] ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::BatchListParams} for more details. + # # List your organization's batches. # - # @param params [OpenAI::Models::BatchListParams, Hash{Symbol=>Object}] . + # @overload list(after: nil, limit: nil, request_options: {}) # - # @option params [String] :after A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place # - # @option params [Integer] :limit A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::BatchListParams def list(params = {}) - parsed, options = OpenAI::Models::BatchListParams.dump_request(params) + parsed, options = OpenAI::BatchListParams.dump_request(params) @client.request( method: :get, path: "batches", query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::Batch, + page: OpenAI::Internal::CursorPage, + model: OpenAI::Batch, options: options ) end # Cancels an in-progress batch. The batch will be in status `cancelling` for up to - # 10 minutes, before changing to `cancelled`, where it will have partial results - # (if any) available in the output file. + # 10 minutes, before changing to `cancelled`, where it will have partial results + # (if any) available in the output file. # - # @param batch_id [String] The ID of the batch to cancel. + # @overload cancel(batch_id, request_options: {}) # - # @param params [OpenAI::Models::BatchCancelParams, Hash{Symbol=>Object}] . + # @param batch_id [String] The ID of the batch to cancel. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Batch] # + # @see OpenAI::Models::BatchCancelParams def cancel(batch_id, params = {}) @client.request( method: :post, - path: ["batches/%0s/cancel", batch_id], - model: OpenAI::Models::Batch, + path: ["batches/%1$s/cancel", batch_id], + model: OpenAI::Batch, options: params[:request_options] ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/beta.rb b/lib/openai/resources/beta.rb index 4eee364b..62d4e049 100644 --- a/lib/openai/resources/beta.rb +++ b/lib/openai/resources/beta.rb @@ -9,8 +9,9 @@ class Beta # @return [OpenAI::Resources::Beta::Threads] attr_reader :threads - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client @assistants = OpenAI::Resources::Beta::Assistants.new(client: client) diff --git a/lib/openai/resources/beta/assistants.rb b/lib/openai/resources/beta/assistants.rb index 45d0010d..77b58e66 100644 --- a/lib/openai/resources/beta/assistants.rb +++ b/lib/openai/resources/beta/assistants.rb @@ -4,256 +4,173 @@ module OpenAI module Resources class Beta class Assistants - # Create an assistant with a model and instructions. - # - # @param params [OpenAI::Models::Beta::AssistantCreateParams, Hash{Symbol=>Object}] . - # - # @option params [String, Symbol, OpenAI::Models::ChatModel] :model ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - # - # @option params [String, nil] :description The description of the assistant. The maximum length is 512 characters. - # - # @option params [String, nil] :instructions The system instructions that the assistant uses. The maximum length is 256,000 - # characters. - # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantCreateParams} for more details. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Create an assistant with a model and instructions. # - # @option params [String, nil] :name The name of the assistant. The maximum length is 256 characters. + # @overload create(model:, description: nil, instructions: nil, metadata: nil, name: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_resources: nil, tools: nil, top_p: nil, request_options: {}) # - # @option params [Symbol, OpenAI::Models::ReasoningEffort, nil] :reasoning_effort **o-series models only** + # @param model [String, Symbol, OpenAI::Models::ChatModel] ID of the model to use. You can use the [List models](https://platform.openai.co # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # @param description [String, nil] The description of the assistant. The maximum length is 512 characters. # - # @option params [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] :response_format Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # @param instructions [String, nil] The system instructions that the assistant uses. The maximum length is 256,000 c # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # @param name [String, nil] The name of the assistant. The maximum length is 256 characters. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # - # @option params [Float, nil] :temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: # - # @option params [OpenAI::Models::Beta::AssistantCreateParams::ToolResources, nil] :tool_resources A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @option params [Array] :tools A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # @param tool_resources [OpenAI::Models::Beta::AssistantCreateParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe # - # @option params [Float, nil] :top_p An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # @param tools [Array] A list of tool enabled on the assistant. There can be a maximum of 128 tools per # - # We generally recommend altering this or temperature but not both. + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Assistant] # + # @see OpenAI::Models::Beta::AssistantCreateParams def create(params) - parsed, options = OpenAI::Models::Beta::AssistantCreateParams.dump_request(params) + parsed, options = OpenAI::Beta::AssistantCreateParams.dump_request(params) @client.request( method: :post, path: "assistants", body: parsed, - model: OpenAI::Models::Beta::Assistant, - options: options + model: OpenAI::Beta::Assistant, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end # Retrieves an assistant. # - # @param assistant_id [String] The ID of the assistant to retrieve. + # @overload retrieve(assistant_id, request_options: {}) # - # @param params [OpenAI::Models::Beta::AssistantRetrieveParams, Hash{Symbol=>Object}] . + # @param assistant_id [String] The ID of the assistant to retrieve. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Assistant] # + # @see OpenAI::Models::Beta::AssistantRetrieveParams def retrieve(assistant_id, params = {}) @client.request( method: :get, - path: ["assistants/%0s", assistant_id], - model: OpenAI::Models::Beta::Assistant, - options: params[:request_options] + path: ["assistants/%1$s", assistant_id], + model: OpenAI::Beta::Assistant, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h} ) end - # Modifies an assistant. - # - # @param assistant_id [String] The ID of the assistant to modify. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantUpdateParams} for more details. # - # @param params [OpenAI::Models::Beta::AssistantUpdateParams, Hash{Symbol=>Object}] . - # - # @option params [String, nil] :description The description of the assistant. The maximum length is 512 characters. - # - # @option params [String, nil] :instructions The system instructions that the assistant uses. The maximum length is 256,000 - # characters. - # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. - # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # Modifies an assistant. # - # @option params [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model::AssistantSupportedModels] :model ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # @overload update(assistant_id, description: nil, instructions: nil, metadata: nil, model: nil, name: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_resources: nil, tools: nil, top_p: nil, request_options: {}) # - # @option params [String, nil] :name The name of the assistant. The maximum length is 256 characters. + # @param assistant_id [String] The ID of the assistant to modify. # - # @option params [Symbol, OpenAI::Models::ReasoningEffort, nil] :reasoning_effort **o-series models only** + # @param description [String, nil] The description of the assistant. The maximum length is 512 characters. # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # @param instructions [String, nil] The system instructions that the assistant uses. The maximum length is 256,000 c # - # @option params [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] :response_format Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # @param model [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model] ID of the model to use. You can use the [List models](https://platform.openai.co # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # @param name [String, nil] The name of the assistant. The maximum length is 256 characters. # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # - # @option params [Float, nil] :temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: # - # @option params [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources, nil] :tool_resources A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @option params [Array] :tools A list of tool enabled on the assistant. There can be a maximum of 128 tools per - # assistant. Tools can be of types `code_interpreter`, `file_search`, or - # `function`. + # @param tool_resources [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe # - # @option params [Float, nil] :top_p An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # @param tools [Array] A list of tool enabled on the assistant. There can be a maximum of 128 tools per # - # We generally recommend altering this or temperature but not both. + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Assistant] # + # @see OpenAI::Models::Beta::AssistantUpdateParams def update(assistant_id, params = {}) - parsed, options = OpenAI::Models::Beta::AssistantUpdateParams.dump_request(params) + parsed, options = OpenAI::Beta::AssistantUpdateParams.dump_request(params) @client.request( method: :post, - path: ["assistants/%0s", assistant_id], + path: ["assistants/%1$s", assistant_id], body: parsed, - model: OpenAI::Models::Beta::Assistant, - options: options + model: OpenAI::Beta::Assistant, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::AssistantListParams} for more details. + # # Returns a list of assistants. # - # @param params [OpenAI::Models::Beta::AssistantListParams, Hash{Symbol=>Object}] . + # @overload list(after: nil, before: nil, limit: nil, order: nil, request_options: {}) # - # @option params [String] :after A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place # - # @option params [String] :before A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place # - # @option params [Integer] :limit A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 # - # @option params [Symbol, OpenAI::Models::Beta::AssistantListParams::Order] :order Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # @param order [Symbol, OpenAI::Models::Beta::AssistantListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::Beta::AssistantListParams def list(params = {}) - parsed, options = OpenAI::Models::Beta::AssistantListParams.dump_request(params) + parsed, options = OpenAI::Beta::AssistantListParams.dump_request(params) @client.request( method: :get, path: "assistants", query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::Beta::Assistant, - options: options + page: OpenAI::Internal::CursorPage, + model: OpenAI::Beta::Assistant, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end # Delete an assistant. # - # @param assistant_id [String] The ID of the assistant to delete. + # @overload delete(assistant_id, request_options: {}) # - # @param params [OpenAI::Models::Beta::AssistantDeleteParams, Hash{Symbol=>Object}] . + # @param assistant_id [String] The ID of the assistant to delete. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::AssistantDeleted] # + # @see OpenAI::Models::Beta::AssistantDeleteParams def delete(assistant_id, params = {}) @client.request( method: :delete, - path: ["assistants/%0s", assistant_id], - model: OpenAI::Models::Beta::AssistantDeleted, - options: params[:request_options] + path: ["assistants/%1$s", assistant_id], + model: OpenAI::Beta::AssistantDeleted, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h} ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/beta/threads.rb b/lib/openai/resources/beta/threads.rb index b7e58a7c..7af8b256 100644 --- a/lib/openai/resources/beta/threads.rb +++ b/lib/openai/resources/beta/threads.rb @@ -3,6 +3,7 @@ module OpenAI module Resources class Beta + # @deprecated The Assistants API is deprecated in favor of the Responses API class Threads # @return [OpenAI::Resources::Beta::Threads::Runs] attr_reader :runs @@ -10,331 +11,241 @@ class Threads # @return [OpenAI::Resources::Beta::Threads::Messages] attr_reader :messages - # Create a thread. + # @deprecated The Assistants API is deprecated in favor of the Responses API + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateParams} for more details. # - # @param params [OpenAI::Models::Beta::ThreadCreateParams, Hash{Symbol=>Object}] . + # Create a thread. # - # @option params [Array] :messages A list of [messages](https://platform.openai.com/docs/api-reference/messages) to - # start the thread with. + # @overload create(messages: nil, metadata: nil, tool_resources: nil, request_options: {}) # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param messages [Array] A list of [messages](https://platform.openai.com/docs/api-reference/messages) to # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [OpenAI::Models::Beta::ThreadCreateParams::ToolResources, nil] :tool_resources A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # @param tool_resources [OpenAI::Models::Beta::ThreadCreateParams::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Thread] # + # @see OpenAI::Models::Beta::ThreadCreateParams def create(params = {}) - parsed, options = OpenAI::Models::Beta::ThreadCreateParams.dump_request(params) + parsed, options = OpenAI::Beta::ThreadCreateParams.dump_request(params) @client.request( method: :post, path: "threads", body: parsed, - model: OpenAI::Models::Beta::Thread, - options: options + model: OpenAI::Beta::Thread, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # # Retrieves a thread. # - # @param thread_id [String] The ID of the thread to retrieve. + # @overload retrieve(thread_id, request_options: {}) # - # @param params [OpenAI::Models::Beta::ThreadRetrieveParams, Hash{Symbol=>Object}] . + # @param thread_id [String] The ID of the thread to retrieve. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Thread] # + # @see OpenAI::Models::Beta::ThreadRetrieveParams def retrieve(thread_id, params = {}) @client.request( method: :get, - path: ["threads/%0s", thread_id], - model: OpenAI::Models::Beta::Thread, - options: params[:request_options] + path: ["threads/%1$s", thread_id], + model: OpenAI::Beta::Thread, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h} ) end - # Modifies a thread. + # @deprecated The Assistants API is deprecated in favor of the Responses API # - # @param thread_id [String] The ID of the thread to modify. Only the `metadata` can be modified. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadUpdateParams} for more details. + # + # Modifies a thread. # - # @param params [OpenAI::Models::Beta::ThreadUpdateParams, Hash{Symbol=>Object}] . + # @overload update(thread_id, metadata: nil, tool_resources: nil, request_options: {}) # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param thread_id [String] The ID of the thread to modify. Only the `metadata` can be modified. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources, nil] :tool_resources A set of resources that are made available to the assistant's tools in this - # thread. The resources are specific to the type of tool. For example, the - # `code_interpreter` tool requires a list of file IDs, while the `file_search` - # tool requires a list of vector store IDs. + # @param tool_resources [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Thread] # + # @see OpenAI::Models::Beta::ThreadUpdateParams def update(thread_id, params = {}) - parsed, options = OpenAI::Models::Beta::ThreadUpdateParams.dump_request(params) + parsed, options = OpenAI::Beta::ThreadUpdateParams.dump_request(params) @client.request( method: :post, - path: ["threads/%0s", thread_id], + path: ["threads/%1$s", thread_id], body: parsed, - model: OpenAI::Models::Beta::Thread, - options: options + model: OpenAI::Beta::Thread, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # # Delete a thread. # - # @param thread_id [String] The ID of the thread to delete. + # @overload delete(thread_id, request_options: {}) # - # @param params [OpenAI::Models::Beta::ThreadDeleteParams, Hash{Symbol=>Object}] . + # @param thread_id [String] The ID of the thread to delete. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::ThreadDeleted] # + # @see OpenAI::Models::Beta::ThreadDeleteParams def delete(thread_id, params = {}) @client.request( method: :delete, - path: ["threads/%0s", thread_id], - model: OpenAI::Models::Beta::ThreadDeleted, - options: params[:request_options] + path: ["threads/%1$s", thread_id], + model: OpenAI::Beta::ThreadDeleted, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h} ) end - # Create a thread and run it in one request. + # @deprecated The Assistants API is deprecated in favor of the Responses API # - # @param params [OpenAI::Models::Beta::ThreadCreateAndRunParams, Hash{Symbol=>Object}] . + # See {OpenAI::Resources::Beta::Threads#stream_raw} for streaming counterpart. # - # @option params [String] :assistant_id The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams} for more details. # - # @option params [String, nil] :instructions Override the default system message of the assistant. This is useful for - # modifying the behavior on a per-run basis. - # - # @option params [Integer, nil] :max_completion_tokens The maximum number of completion tokens that may be used over the course of the - # run. The run will make a best effort to use only the number of completion tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # completion tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. - # - # @option params [Integer, nil] :max_prompt_tokens The maximum number of prompt tokens that may be used over the course of the run. - # The run will make a best effort to use only the number of prompt tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # prompt tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # Create a thread and run it in one request. # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @overload create_and_run(assistant_id:, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, response_format: nil, temperature: nil, thread: nil, tool_choice: nil, tool_resources: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {}) # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista # - # @option params [String, Symbol, OpenAI::Models::ChatModel, nil] :model The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # @param instructions [String, nil] Override the default system message of the assistant. This is useful for modifyi # - # @option params [Boolean] :parallel_tool_calls Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # @param max_completion_tokens [Integer, nil] The maximum number of completion tokens that may be used over the course of the # - # @option params [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] :response_format Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # @param max_prompt_tokens [Integer, nil] The maximum number of prompt tokens that may be used over the course of the run. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # @param model [String, Symbol, OpenAI::Models::ChatModel, nil] The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g # - # @option params [Float, nil] :temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: # - # @option params [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread] :thread Options to create a new thread. If no thread is provided when running a request, - # an empty thread will be created. + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @option params [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] :tool_choice Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # @param thread [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread] Options to create a new thread. If no thread is provided when running a # - # @option params [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] :tool_resources A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Controls which (if any) tool is called by the model. # - # @option params [Array, nil] :tools Override the tools the assistant can use for this run. This is useful for - # modifying the behavior on a per-run basis. + # @param tool_resources [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe # - # @option params [Float, nil] :top_p An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # @param tools [Array, nil] Override the tools the assistant can use for this run. This is useful for modify # - # We generally recommend altering this or temperature but not both. + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the # - # @option params [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] :truncation_strategy Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # @param truncation_strategy [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::Run] # + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams def create_and_run(params) - parsed, options = OpenAI::Models::Beta::ThreadCreateAndRunParams.dump_request(params) - parsed.delete(:stream) + parsed, options = OpenAI::Beta::ThreadCreateAndRunParams.dump_request(params) + if parsed[:stream] + message = "Please use `#stream_raw` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "threads/runs", body: parsed, - model: OpenAI::Models::Beta::Threads::Run, - options: options + model: OpenAI::Beta::Threads::Run, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # Create a thread and run it in one request. - # - # @param params [OpenAI::Models::Beta::ThreadCreateAndRunParams, Hash{Symbol=>Object}] . - # - # @option params [String] :assistant_id The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. + # @deprecated The Assistants API is deprecated in favor of the Responses API # - # @option params [String, nil] :instructions Override the default system message of the assistant. This is useful for - # modifying the behavior on a per-run basis. + # See {OpenAI::Resources::Beta::Threads#create_and_run} for non-streaming + # counterpart. # - # @option params [Integer, nil] :max_completion_tokens The maximum number of completion tokens that may be used over the course of the - # run. The run will make a best effort to use only the number of completion tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # completion tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::ThreadCreateAndRunParams} for more details. # - # @option params [Integer, nil] :max_prompt_tokens The maximum number of prompt tokens that may be used over the course of the run. - # The run will make a best effort to use only the number of prompt tokens - # specified, across multiple turns of the run. If the run exceeds the number of - # prompt tokens specified, the run will end with status `incomplete`. See - # `incomplete_details` for more info. + # Create a thread and run it in one request. # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @overload stream_raw(assistant_id:, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, response_format: nil, temperature: nil, thread: nil, tool_choice: nil, tool_resources: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {}) # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista # - # @option params [String, Symbol, OpenAI::Models::ChatModel, nil] :model The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - # be used to execute this run. If a value is provided here, it will override the - # model associated with the assistant. If not, the model associated with the - # assistant will be used. + # @param instructions [String, nil] Override the default system message of the assistant. This is useful for modifyi # - # @option params [Boolean] :parallel_tool_calls Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # @param max_completion_tokens [Integer, nil] The maximum number of completion tokens that may be used over the course of the # - # @option params [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] :response_format Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # @param max_prompt_tokens [Integer, nil] The maximum number of prompt tokens that may be used over the course of the run. # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # @param model [String, Symbol, OpenAI::Models::ChatModel, nil] The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g # - # @option params [Float, nil] :temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: # - # @option params [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread] :thread Options to create a new thread. If no thread is provided when running a request, - # an empty thread will be created. + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @option params [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] :tool_choice Controls which (if any) tool is called by the model. `none` means the model will - # not call any tools and instead generates a message. `auto` is the default value - # and means the model can pick between generating a message or calling one or more - # tools. `required` means the model must call one or more tools before responding - # to the user. Specifying a particular tool like `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # @param thread [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread] Options to create a new thread. If no thread is provided when running a # - # @option params [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] :tool_resources A set of resources that are used by the assistant's tools. The resources are - # specific to the type of tool. For example, the `code_interpreter` tool requires - # a list of file IDs, while the `file_search` tool requires a list of vector store - # IDs. + # @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Controls which (if any) tool is called by the model. # - # @option params [Array, nil] :tools Override the tools the assistant can use for this run. This is useful for - # modifying the behavior on a per-run basis. + # @param tool_resources [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe # - # @option params [Float, nil] :top_p An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # @param tools [Array, nil] Override the tools the assistant can use for this run. This is useful for modify # - # We generally recommend altering this or temperature but not both. + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the # - # @option params [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] :truncation_strategy Controls for how a thread will be truncated prior to the run. Use this to - # control the intial context window of the run. + # @param truncation_strategy [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Stream] + # @return [OpenAI::Internal::Stream] # - def create_and_run_streaming(params) - parsed, options = OpenAI::Models::Beta::ThreadCreateAndRunParams.dump_request(params) + # @see OpenAI::Models::Beta::ThreadCreateAndRunParams + def stream_raw(params) + parsed, options = OpenAI::Beta::ThreadCreateAndRunParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create_and_run` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) @client.request( method: :post, path: "threads/runs", headers: {"accept" => "text/event-stream"}, body: parsed, - stream: OpenAI::Stream, - model: OpenAI::Models::Beta::AssistantStreamEvent, - options: options + stream: OpenAI::Internal::Stream, + model: OpenAI::Beta::AssistantStreamEvent, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client @runs = OpenAI::Resources::Beta::Threads::Runs.new(client: client) diff --git a/lib/openai/resources/beta/threads/messages.rb b/lib/openai/resources/beta/threads/messages.rb index 3ea2f318..50bafc68 100644 --- a/lib/openai/resources/beta/threads/messages.rb +++ b/lib/openai/resources/beta/threads/messages.rb @@ -4,174 +4,180 @@ module OpenAI module Resources class Beta class Threads + # @deprecated The Assistants API is deprecated in favor of the Responses API class Messages - # Create a message. + # @deprecated The Assistants API is deprecated in favor of the Responses API # - # @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # to create a message for. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::MessageCreateParams} for more details. # - # @param params [OpenAI::Models::Beta::Threads::MessageCreateParams, Hash{Symbol=>Object}] . + # Create a message. # - # @option params [String, Array] :content The text contents of the message. + # @overload create(thread_id, content:, role:, attachments: nil, metadata: nil, request_options: {}) # - # @option params [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Role] :role The role of the entity that is creating the message. Allowed values include: + # @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t # - # - `user`: Indicates the message is sent by an actual user and should be used in - # most cases to represent user-generated messages. - # - `assistant`: Indicates the message is generated by the assistant. Use this - # value to insert messages from the assistant into the conversation. + # @param content [String, Array] The text contents of the message. # - # @option params [Array, nil] :attachments A list of files attached to the message, and the tools they should be added to. + # @param role [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Role] The role of the entity that is creating the message. Allowed values include: # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param attachments [Array, nil] A list of files attached to the message, and the tools they should be added to. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::Message] # + # @see OpenAI::Models::Beta::Threads::MessageCreateParams def create(thread_id, params) - parsed, options = OpenAI::Models::Beta::Threads::MessageCreateParams.dump_request(params) + parsed, options = OpenAI::Beta::Threads::MessageCreateParams.dump_request(params) @client.request( method: :post, - path: ["threads/%0s/messages", thread_id], + path: ["threads/%1$s/messages", thread_id], body: parsed, - model: OpenAI::Models::Beta::Threads::Message, - options: options + model: OpenAI::Beta::Threads::Message, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::MessageRetrieveParams} for more details. + # # Retrieve a message. # - # @param message_id [String] The ID of the message to retrieve. + # @overload retrieve(message_id, thread_id:, request_options: {}) # - # @param params [OpenAI::Models::Beta::Threads::MessageRetrieveParams, Hash{Symbol=>Object}] . + # @param message_id [String] The ID of the message to retrieve. # - # @option params [String] :thread_id The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # to which this message belongs. + # @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::Message] # + # @see OpenAI::Models::Beta::Threads::MessageRetrieveParams def retrieve(message_id, params) - parsed, options = OpenAI::Models::Beta::Threads::MessageRetrieveParams.dump_request(params) - thread_id = parsed.delete(:thread_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::Beta::Threads::MessageRetrieveParams.dump_request(params) + thread_id = + parsed.delete(:thread_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :get, - path: ["threads/%0s/messages/%1s", thread_id, message_id], - model: OpenAI::Models::Beta::Threads::Message, - options: options + path: ["threads/%1$s/messages/%2$s", thread_id, message_id], + model: OpenAI::Beta::Threads::Message, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # Modifies a message. + # @deprecated The Assistants API is deprecated in favor of the Responses API # - # @param message_id [String] Path param: The ID of the message to modify. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::MessageUpdateParams} for more details. + # + # Modifies a message. # - # @param params [OpenAI::Models::Beta::Threads::MessageUpdateParams, Hash{Symbol=>Object}] . + # @overload update(message_id, thread_id:, metadata: nil, request_options: {}) # - # @option params [String] :thread_id Path param: The ID of the thread to which this message belongs. + # @param message_id [String] Path param: The ID of the message to modify. # - # @option params [Hash{Symbol=>String}, nil] :metadata Body param: Set of 16 key-value pairs that can be attached to an object. This - # can be useful for storing additional information about the object in a - # structured format, and querying for objects via API or the dashboard. + # @param thread_id [String] Path param: The ID of the thread to which this message belongs. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param metadata [Hash{Symbol=>String}, nil] Body param: Set of 16 key-value pairs that can be attached to an object. This ca # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::Message] # + # @see OpenAI::Models::Beta::Threads::MessageUpdateParams def update(message_id, params) - parsed, options = OpenAI::Models::Beta::Threads::MessageUpdateParams.dump_request(params) - thread_id = parsed.delete(:thread_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::Beta::Threads::MessageUpdateParams.dump_request(params) + thread_id = + parsed.delete(:thread_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :post, - path: ["threads/%0s/messages/%1s", thread_id, message_id], + path: ["threads/%1$s/messages/%2$s", thread_id, message_id], body: parsed, - model: OpenAI::Models::Beta::Threads::Message, - options: options + model: OpenAI::Beta::Threads::Message, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::MessageListParams} for more details. + # # Returns a list of messages for a given thread. # - # @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # the messages belong to. + # @overload list(thread_id, after: nil, before: nil, limit: nil, order: nil, run_id: nil, request_options: {}) # - # @param params [OpenAI::Models::Beta::Threads::MessageListParams, Hash{Symbol=>Object}] . + # @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t # - # @option params [String] :after A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place # - # @option params [String] :before A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place # - # @option params [Integer] :limit A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 # - # @option params [Symbol, OpenAI::Models::Beta::Threads::MessageListParams::Order] :order Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # @param order [Symbol, OpenAI::Models::Beta::Threads::MessageListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord # - # @option params [String] :run_id Filter messages by the run ID that generated them. + # @param run_id [String] Filter messages by the run ID that generated them. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::Beta::Threads::MessageListParams def list(thread_id, params = {}) - parsed, options = OpenAI::Models::Beta::Threads::MessageListParams.dump_request(params) + parsed, options = OpenAI::Beta::Threads::MessageListParams.dump_request(params) @client.request( method: :get, - path: ["threads/%0s/messages", thread_id], + path: ["threads/%1$s/messages", thread_id], query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::Beta::Threads::Message, - options: options + page: OpenAI::Internal::CursorPage, + model: OpenAI::Beta::Threads::Message, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # # Deletes a message. # - # @param message_id [String] The ID of the message to delete. + # @overload delete(message_id, thread_id:, request_options: {}) # - # @param params [OpenAI::Models::Beta::Threads::MessageDeleteParams, Hash{Symbol=>Object}] . + # @param message_id [String] The ID of the message to delete. # - # @option params [String] :thread_id The ID of the thread to which this message belongs. + # @param thread_id [String] The ID of the thread to which this message belongs. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::MessageDeleted] # + # @see OpenAI::Models::Beta::Threads::MessageDeleteParams def delete(message_id, params) - parsed, options = OpenAI::Models::Beta::Threads::MessageDeleteParams.dump_request(params) - thread_id = parsed.delete(:thread_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::Beta::Threads::MessageDeleteParams.dump_request(params) + thread_id = + parsed.delete(:thread_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :delete, - path: ["threads/%0s/messages/%1s", thread_id, message_id], - model: OpenAI::Models::Beta::Threads::MessageDeleted, - options: options + path: ["threads/%1$s/messages/%2$s", thread_id, message_id], + model: OpenAI::Beta::Threads::MessageDeleted, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/beta/threads/runs.rb b/lib/openai/resources/beta/threads/runs.rb index 34f408d7..37648e04 100644 --- a/lib/openai/resources/beta/threads/runs.rb +++ b/lib/openai/resources/beta/threads/runs.rb @@ -4,467 +4,382 @@ module OpenAI module Resources class Beta class Threads + # @deprecated The Assistants API is deprecated in favor of the Responses API class Runs # @return [OpenAI::Resources::Beta::Threads::Runs::Steps] attr_reader :steps - # Create a run. + # @deprecated The Assistants API is deprecated in favor of the Responses API # - # @param thread_id [String] Path param: The ID of the thread to run. + # See {OpenAI::Resources::Beta::Threads::Runs#create_stream_raw} for streaming + # counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunCreateParams} for more details. # - # @param params [OpenAI::Models::Beta::Threads::RunCreateParams, Hash{Symbol=>Object}] . - # - # @option params [String] :assistant_id Body param: The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. - # - # @option params [Array] :include Query param: A list of additional fields to include in the response. Currently - # the only supported value is - # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - # search result content. + # Create a run. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # @overload create(thread_id, assistant_id:, include: nil, additional_instructions: nil, additional_messages: nil, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_choice: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {}) # - # @option params [String, nil] :additional_instructions Body param: Appends additional instructions at the end of the instructions for - # the run. This is useful for modifying the behavior on a per-run basis without - # overriding other instructions. + # @param thread_id [String] Path param: The ID of the thread to run. # - # @option params [Array, nil] :additional_messages Body param: Adds additional messages to the thread before creating the run. + # @param assistant_id [String] Body param: The ID of the [assistant](https://platform.openai.com/docs/api-refer # - # @option params [String, nil] :instructions Body param: Overrides the - # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) - # of the assistant. This is useful for modifying the behavior on a per-run basis. + # @param include [Array] Query param: A list of additional fields to include in the response. Currently t # - # @option params [Integer, nil] :max_completion_tokens Body param: The maximum number of completion tokens that may be used over the - # course of the run. The run will make a best effort to use only the number of - # completion tokens specified, across multiple turns of the run. If the run - # exceeds the number of completion tokens specified, the run will end with status - # `incomplete`. See `incomplete_details` for more info. + # @param additional_instructions [String, nil] Body param: Appends additional instructions at the end of the instructions for t # - # @option params [Integer, nil] :max_prompt_tokens Body param: The maximum number of prompt tokens that may be used over the course - # of the run. The run will make a best effort to use only the number of prompt - # tokens specified, across multiple turns of the run. If the run exceeds the - # number of prompt tokens specified, the run will end with status `incomplete`. - # See `incomplete_details` for more info. + # @param additional_messages [Array, nil] Body param: Adds additional messages to the thread before creating the run. # - # @option params [Hash{Symbol=>String}, nil] :metadata Body param: Set of 16 key-value pairs that can be attached to an object. This - # can be useful for storing additional information about the object in a - # structured format, and querying for objects via API or the dashboard. + # @param instructions [String, nil] Body param: Overrides the [instructions](https://platform.openai.com/docs/api-re # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param max_completion_tokens [Integer, nil] Body param: The maximum number of completion tokens that may be used over the co # - # @option params [String, Symbol, OpenAI::Models::ChatModel, nil] :model Body param: The ID of the - # [Model](https://platform.openai.com/docs/api-reference/models) to be used to - # execute this run. If a value is provided here, it will override the model - # associated with the assistant. If not, the model associated with the assistant - # will be used. + # @param max_prompt_tokens [Integer, nil] Body param: The maximum number of prompt tokens that may be used over the course # - # @option params [Boolean] :parallel_tool_calls Body param: Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # @param metadata [Hash{Symbol=>String}, nil] Body param: Set of 16 key-value pairs that can be attached to an object. This ca # - # @option params [Symbol, OpenAI::Models::ReasoningEffort, nil] :reasoning_effort Body param: **o-series models only** + # @param model [String, Symbol, OpenAI::Models::ChatModel, nil] Body param: The ID of the [Model](https://platform.openai.com/docs/api-reference # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # @param parallel_tool_calls [Boolean] Body param: Whether to enable [parallel function calling](https://platform.opena # - # @option params [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] :response_format Body param: Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Body param: Constrains effort on reasoning for # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Body param: Specifies the format that the model must output. Compatible with [GP # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # @param temperature [Float, nil] Body param: What sampling temperature to use, between 0 and 2. Higher values lik # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Body param: Controls which (if any) tool is called by the model. # - # @option params [Float, nil] :temperature Body param: What sampling temperature to use, between 0 and 2. Higher values - # like 0.8 will make the output more random, while lower values like 0.2 will make - # it more focused and deterministic. + # @param tools [Array, nil] Body param: Override the tools the assistant can use for this run. This is usefu # - # @option params [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] :tool_choice Body param: Controls which (if any) tool is called by the model. `none` means - # the model will not call any tools and instead generates a message. `auto` is the - # default value and means the model can pick between generating a message or - # calling one or more tools. `required` means the model must call one or more - # tools before responding to the user. Specifying a particular tool like - # `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # @param top_p [Float, nil] Body param: An alternative to sampling with temperature, called nucleus sampling # - # @option params [Array, nil] :tools Body param: Override the tools the assistant can use for this run. This is - # useful for modifying the behavior on a per-run basis. + # @param truncation_strategy [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] Body param: Controls for how a thread will be truncated prior to the run. Use th # - # @option params [Float, nil] :top_p Body param: An alternative to sampling with temperature, called nucleus - # sampling, where the model considers the results of the tokens with top_p - # probability mass. So 0.1 means only the tokens comprising the top 10% - # probability mass are considered. - # - # We generally recommend altering this or temperature but not both. - # - # @option params [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] :truncation_strategy Body param: Controls for how a thread will be truncated prior to the run. Use - # this to control the intial context window of the run. - # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::Run] # + # @see OpenAI::Models::Beta::Threads::RunCreateParams def create(thread_id, params) - parsed, options = OpenAI::Models::Beta::Threads::RunCreateParams.dump_request(params) - parsed.delete(:stream) + parsed, options = OpenAI::Beta::Threads::RunCreateParams.dump_request(params) + if parsed[:stream] + message = "Please use `#create_stream_raw` for the streaming use case." + raise ArgumentError.new(message) + end query_params = [:include] @client.request( method: :post, - path: ["threads/%0s/runs", thread_id], + path: ["threads/%1$s/runs", thread_id], query: parsed.slice(*query_params), body: parsed.except(*query_params), - model: OpenAI::Models::Beta::Threads::Run, - options: options + model: OpenAI::Beta::Threads::Run, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # + # See {OpenAI::Resources::Beta::Threads::Runs#create} for non-streaming + # counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunCreateParams} for more details. + # # Create a run. # + # @overload create_stream_raw(thread_id, assistant_id:, include: nil, additional_instructions: nil, additional_messages: nil, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_choice: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {}) + # # @param thread_id [String] Path param: The ID of the thread to run. # - # @param params [OpenAI::Models::Beta::Threads::RunCreateParams, Hash{Symbol=>Object}] . - # - # @option params [String] :assistant_id Body param: The ID of the - # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to - # execute this run. - # - # @option params [Array] :include Query param: A list of additional fields to include in the response. Currently - # the only supported value is - # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - # search result content. - # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # @param assistant_id [String] Body param: The ID of the [assistant](https://platform.openai.com/docs/api-refer # - # @option params [String, nil] :additional_instructions Body param: Appends additional instructions at the end of the instructions for - # the run. This is useful for modifying the behavior on a per-run basis without - # overriding other instructions. + # @param include [Array] Query param: A list of additional fields to include in the response. Currently t # - # @option params [Array, nil] :additional_messages Body param: Adds additional messages to the thread before creating the run. + # @param additional_instructions [String, nil] Body param: Appends additional instructions at the end of the instructions for t # - # @option params [String, nil] :instructions Body param: Overrides the - # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) - # of the assistant. This is useful for modifying the behavior on a per-run basis. + # @param additional_messages [Array, nil] Body param: Adds additional messages to the thread before creating the run. # - # @option params [Integer, nil] :max_completion_tokens Body param: The maximum number of completion tokens that may be used over the - # course of the run. The run will make a best effort to use only the number of - # completion tokens specified, across multiple turns of the run. If the run - # exceeds the number of completion tokens specified, the run will end with status - # `incomplete`. See `incomplete_details` for more info. + # @param instructions [String, nil] Body param: Overrides the [instructions](https://platform.openai.com/docs/api-re # - # @option params [Integer, nil] :max_prompt_tokens Body param: The maximum number of prompt tokens that may be used over the course - # of the run. The run will make a best effort to use only the number of prompt - # tokens specified, across multiple turns of the run. If the run exceeds the - # number of prompt tokens specified, the run will end with status `incomplete`. - # See `incomplete_details` for more info. + # @param max_completion_tokens [Integer, nil] Body param: The maximum number of completion tokens that may be used over the co # - # @option params [Hash{Symbol=>String}, nil] :metadata Body param: Set of 16 key-value pairs that can be attached to an object. This - # can be useful for storing additional information about the object in a - # structured format, and querying for objects via API or the dashboard. + # @param max_prompt_tokens [Integer, nil] Body param: The maximum number of prompt tokens that may be used over the course # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param metadata [Hash{Symbol=>String}, nil] Body param: Set of 16 key-value pairs that can be attached to an object. This ca # - # @option params [String, Symbol, OpenAI::Models::ChatModel, nil] :model Body param: The ID of the - # [Model](https://platform.openai.com/docs/api-reference/models) to be used to - # execute this run. If a value is provided here, it will override the model - # associated with the assistant. If not, the model associated with the assistant - # will be used. + # @param model [String, Symbol, OpenAI::Models::ChatModel, nil] Body param: The ID of the [Model](https://platform.openai.com/docs/api-reference # - # @option params [Boolean] :parallel_tool_calls Body param: Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # @param parallel_tool_calls [Boolean] Body param: Whether to enable [parallel function calling](https://platform.opena # - # @option params [Symbol, OpenAI::Models::ReasoningEffort, nil] :reasoning_effort Body param: **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Body param: Constrains effort on reasoning for # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Body param: Specifies the format that the model must output. Compatible with [GP # - # @option params [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] :response_format Body param: Specifies the format that the model must output. Compatible with - # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), - # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # @param temperature [Float, nil] Body param: What sampling temperature to use, between 0 and 2. Higher values lik # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Body param: Controls which (if any) tool is called by the model. # - # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - # message the model generates is valid JSON. + # @param tools [Array, nil] Body param: Override the tools the assistant can use for this run. This is usefu # - # **Important:** when using JSON mode, you **must** also instruct the model to - # produce JSON yourself via a system or user message. Without this, the model may - # generate an unending stream of whitespace until the generation reaches the token - # limit, resulting in a long-running and seemingly "stuck" request. Also note that - # the message content may be partially cut off if `finish_reason="length"`, which - # indicates the generation exceeded `max_tokens` or the conversation exceeded the - # max context length. + # @param top_p [Float, nil] Body param: An alternative to sampling with temperature, called nucleus sampling # - # @option params [Float, nil] :temperature Body param: What sampling temperature to use, between 0 and 2. Higher values - # like 0.8 will make the output more random, while lower values like 0.2 will make - # it more focused and deterministic. + # @param truncation_strategy [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] Body param: Controls for how a thread will be truncated prior to the run. Use th # - # @option params [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] :tool_choice Body param: Controls which (if any) tool is called by the model. `none` means - # the model will not call any tools and instead generates a message. `auto` is the - # default value and means the model can pick between generating a message or - # calling one or more tools. `required` means the model must call one or more - # tools before responding to the user. Specifying a particular tool like - # `{"type": "file_search"}` or - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @option params [Array, nil] :tools Body param: Override the tools the assistant can use for this run. This is - # useful for modifying the behavior on a per-run basis. + # @return [OpenAI::Internal::Stream] # - # @option params [Float, nil] :top_p Body param: An alternative to sampling with temperature, called nucleus - # sampling, where the model considers the results of the tokens with top_p - # probability mass. So 0.1 means only the tokens comprising the top 10% - # probability mass are considered. - # - # We generally recommend altering this or temperature but not both. - # - # @option params [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] :truncation_strategy Body param: Controls for how a thread will be truncated prior to the run. Use - # this to control the intial context window of the run. - # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options - # - # @return [OpenAI::Stream] - # - def create_streaming(thread_id, params) - parsed, options = OpenAI::Models::Beta::Threads::RunCreateParams.dump_request(params) + # @see OpenAI::Models::Beta::Threads::RunCreateParams + def create_stream_raw(thread_id, params) + parsed, options = OpenAI::Beta::Threads::RunCreateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) query_params = [:include] @client.request( method: :post, - path: ["threads/%0s/runs", thread_id], + path: ["threads/%1$s/runs", thread_id], query: parsed.slice(*query_params), headers: {"accept" => "text/event-stream"}, body: parsed.except(*query_params), - stream: OpenAI::Stream, - model: OpenAI::Models::Beta::AssistantStreamEvent, - options: options + stream: OpenAI::Internal::Stream, + model: OpenAI::Beta::AssistantStreamEvent, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunRetrieveParams} for more details. + # # Retrieves a run. # - # @param run_id [String] The ID of the run to retrieve. + # @overload retrieve(run_id, thread_id:, request_options: {}) # - # @param params [OpenAI::Models::Beta::Threads::RunRetrieveParams, Hash{Symbol=>Object}] . + # @param run_id [String] The ID of the run to retrieve. # - # @option params [String] :thread_id The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) - # that was run. + # @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::Run] # + # @see OpenAI::Models::Beta::Threads::RunRetrieveParams def retrieve(run_id, params) - parsed, options = OpenAI::Models::Beta::Threads::RunRetrieveParams.dump_request(params) - thread_id = parsed.delete(:thread_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::Beta::Threads::RunRetrieveParams.dump_request(params) + thread_id = + parsed.delete(:thread_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :get, - path: ["threads/%0s/runs/%1s", thread_id, run_id], - model: OpenAI::Models::Beta::Threads::Run, - options: options + path: ["threads/%1$s/runs/%2$s", thread_id, run_id], + model: OpenAI::Beta::Threads::Run, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # Modifies a run. + # @deprecated The Assistants API is deprecated in favor of the Responses API # - # @param run_id [String] Path param: The ID of the run to modify. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunUpdateParams} for more details. # - # @param params [OpenAI::Models::Beta::Threads::RunUpdateParams, Hash{Symbol=>Object}] . + # Modifies a run. # - # @option params [String] :thread_id Path param: The ID of the - # [thread](https://platform.openai.com/docs/api-reference/threads) that was run. + # @overload update(run_id, thread_id:, metadata: nil, request_options: {}) # - # @option params [Hash{Symbol=>String}, nil] :metadata Body param: Set of 16 key-value pairs that can be attached to an object. This - # can be useful for storing additional information about the object in a - # structured format, and querying for objects via API or the dashboard. + # @param run_id [String] Path param: The ID of the run to modify. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param thread_id [String] Path param: The ID of the [thread](https://platform.openai.com/docs/api-referenc # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param metadata [Hash{Symbol=>String}, nil] Body param: Set of 16 key-value pairs that can be attached to an object. This ca + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::Run] # + # @see OpenAI::Models::Beta::Threads::RunUpdateParams def update(run_id, params) - parsed, options = OpenAI::Models::Beta::Threads::RunUpdateParams.dump_request(params) - thread_id = parsed.delete(:thread_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::Beta::Threads::RunUpdateParams.dump_request(params) + thread_id = + parsed.delete(:thread_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :post, - path: ["threads/%0s/runs/%1s", thread_id, run_id], + path: ["threads/%1$s/runs/%2$s", thread_id, run_id], body: parsed, - model: OpenAI::Models::Beta::Threads::Run, - options: options + model: OpenAI::Beta::Threads::Run, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunListParams} for more details. + # # Returns a list of runs belonging to a thread. # - # @param thread_id [String] The ID of the thread the run belongs to. + # @overload list(thread_id, after: nil, before: nil, limit: nil, order: nil, request_options: {}) # - # @param params [OpenAI::Models::Beta::Threads::RunListParams, Hash{Symbol=>Object}] . + # @param thread_id [String] The ID of the thread the run belongs to. # - # @option params [String] :after A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place # - # @option params [String] :before A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place # - # @option params [Integer] :limit A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 # - # @option params [Symbol, OpenAI::Models::Beta::Threads::RunListParams::Order] :order Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # @param order [Symbol, OpenAI::Models::Beta::Threads::RunListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::Beta::Threads::RunListParams def list(thread_id, params = {}) - parsed, options = OpenAI::Models::Beta::Threads::RunListParams.dump_request(params) + parsed, options = OpenAI::Beta::Threads::RunListParams.dump_request(params) @client.request( method: :get, - path: ["threads/%0s/runs", thread_id], + path: ["threads/%1$s/runs", thread_id], query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::Beta::Threads::Run, - options: options + page: OpenAI::Internal::CursorPage, + model: OpenAI::Beta::Threads::Run, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # # Cancels a run that is `in_progress`. # - # @param run_id [String] The ID of the run to cancel. + # @overload cancel(run_id, thread_id:, request_options: {}) # - # @param params [OpenAI::Models::Beta::Threads::RunCancelParams, Hash{Symbol=>Object}] . + # @param run_id [String] The ID of the run to cancel. # - # @option params [String] :thread_id The ID of the thread to which this run belongs. + # @param thread_id [String] The ID of the thread to which this run belongs. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::Run] # + # @see OpenAI::Models::Beta::Threads::RunCancelParams def cancel(run_id, params) - parsed, options = OpenAI::Models::Beta::Threads::RunCancelParams.dump_request(params) - thread_id = parsed.delete(:thread_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::Beta::Threads::RunCancelParams.dump_request(params) + thread_id = + parsed.delete(:thread_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :post, - path: ["threads/%0s/runs/%1s/cancel", thread_id, run_id], - model: OpenAI::Models::Beta::Threads::Run, - options: options + path: ["threads/%1$s/runs/%2$s/cancel", thread_id, run_id], + model: OpenAI::Beta::Threads::Run, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # + # See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs_stream_raw} for + # streaming counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams} for more details. + # # When a run has the `status: "requires_action"` and `required_action.type` is - # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - # tool calls once they're all completed. All outputs must be submitted in a single - # request. + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. # - # @param run_id [String] Path param: The ID of the run that requires the tool output submission. + # @overload submit_tool_outputs(run_id, thread_id:, tool_outputs:, request_options: {}) # - # @param params [OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams, Hash{Symbol=>Object}] . + # @param run_id [String] Path param: The ID of the run that requires the tool output submission. # - # @option params [String] :thread_id Path param: The ID of the - # [thread](https://platform.openai.com/docs/api-reference/threads) to which this - # run belongs. + # @param thread_id [String] Path param: The ID of the [thread](https://platform.openai.com/docs/api-referenc # - # @option params [Array] :tool_outputs Body param: A list of tools for which the outputs are being submitted. + # @param tool_outputs [Array] Body param: A list of tools for which the outputs are being submitted. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::Run] # + # @see OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams def submit_tool_outputs(run_id, params) - parsed, options = OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params) - parsed.delete(:stream) - thread_id = parsed.delete(:thread_id) do - raise ArgumentError.new("missing required path argument #{_1}") + parsed, options = OpenAI::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params) + if parsed[:stream] + message = "Please use `#submit_tool_outputs_stream_raw` for the streaming use case." + raise ArgumentError.new(message) end + thread_id = + parsed.delete(:thread_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :post, - path: ["threads/%0s/runs/%1s/submit_tool_outputs", thread_id, run_id], + path: ["threads/%1$s/runs/%2$s/submit_tool_outputs", thread_id, run_id], body: parsed, - model: OpenAI::Models::Beta::Threads::Run, - options: options + model: OpenAI::Beta::Threads::Run, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # @deprecated The Assistants API is deprecated in favor of the Responses API + # + # See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs} for + # non-streaming counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams} for more details. + # # When a run has the `status: "requires_action"` and `required_action.type` is - # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the - # tool calls once they're all completed. All outputs must be submitted in a single - # request. + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. # - # @param run_id [String] Path param: The ID of the run that requires the tool output submission. + # @overload submit_tool_outputs_stream_raw(run_id, thread_id:, tool_outputs:, request_options: {}) # - # @param params [OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams, Hash{Symbol=>Object}] . + # @param run_id [String] Path param: The ID of the run that requires the tool output submission. # - # @option params [String] :thread_id Path param: The ID of the - # [thread](https://platform.openai.com/docs/api-reference/threads) to which this - # run belongs. + # @param thread_id [String] Path param: The ID of the [thread](https://platform.openai.com/docs/api-referenc # - # @option params [Array] :tool_outputs Body param: A list of tools for which the outputs are being submitted. + # @param tool_outputs [Array] Body param: A list of tools for which the outputs are being submitted. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Stream] + # @return [OpenAI::Internal::Stream] # - def submit_tool_outputs_streaming(run_id, params) - parsed, options = OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params) - parsed.store(:stream, true) - thread_id = parsed.delete(:thread_id) do - raise ArgumentError.new("missing required path argument #{_1}") + # @see OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams + def submit_tool_outputs_stream_raw(run_id, params) + parsed, options = OpenAI::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#submit_tool_outputs` for the non-streaming use case." + raise ArgumentError.new(message) end + parsed.store(:stream, true) + thread_id = + parsed.delete(:thread_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :post, - path: ["threads/%0s/runs/%1s/submit_tool_outputs", thread_id, run_id], + path: ["threads/%1$s/runs/%2$s/submit_tool_outputs", thread_id, run_id], headers: {"accept" => "text/event-stream"}, body: parsed, - stream: OpenAI::Stream, - model: OpenAI::Models::Beta::AssistantStreamEvent, - options: options + stream: OpenAI::Internal::Stream, + model: OpenAI::Beta::AssistantStreamEvent, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client @steps = OpenAI::Resources::Beta::Threads::Runs::Steps.new(client: client) diff --git a/lib/openai/resources/beta/threads/runs/steps.rb b/lib/openai/resources/beta/threads/runs/steps.rb index e44fb9a5..669ce368 100644 --- a/lib/openai/resources/beta/threads/runs/steps.rb +++ b/lib/openai/resources/beta/threads/runs/steps.rb @@ -5,101 +5,96 @@ module Resources class Beta class Threads class Runs + # @deprecated The Assistants API is deprecated in favor of the Responses API class Steps - # Retrieves a run step. + # @deprecated The Assistants API is deprecated in favor of the Responses API # - # @param step_id [String] Path param: The ID of the run step to retrieve. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams} for more details. + # + # Retrieves a run step. # - # @param params [OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams, Hash{Symbol=>Object}] . + # @overload retrieve(step_id, thread_id:, run_id:, include: nil, request_options: {}) # - # @option params [String] :thread_id Path param: The ID of the thread to which the run and run step belongs. + # @param step_id [String] Path param: The ID of the run step to retrieve. # - # @option params [String] :run_id Path param: The ID of the run to which the run step belongs. + # @param thread_id [String] Path param: The ID of the thread to which the run and run step belongs. # - # @option params [Array] :include Query param: A list of additional fields to include in the response. Currently - # the only supported value is - # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - # search result content. + # @param run_id [String] Path param: The ID of the run to which the run step belongs. # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # @param include [Array] Query param: A list of additional fields to include in the response. Currently t # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] # + # @see OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams def retrieve(step_id, params) - parsed, options = OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams.dump_request(params) - thread_id = parsed.delete(:thread_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end - run_id = parsed.delete(:run_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::Beta::Threads::Runs::StepRetrieveParams.dump_request(params) + thread_id = + parsed.delete(:thread_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + run_id = + parsed.delete(:run_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :get, - path: ["threads/%0s/runs/%1s/steps/%2s", thread_id, run_id, step_id], + path: ["threads/%1$s/runs/%2$s/steps/%3$s", thread_id, run_id, step_id], query: parsed, - model: OpenAI::Models::Beta::Threads::Runs::RunStep, - options: options + model: OpenAI::Beta::Threads::Runs::RunStep, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # Returns a list of run steps belonging to a run. + # @deprecated The Assistants API is deprecated in favor of the Responses API # - # @param run_id [String] Path param: The ID of the run the run steps belong to. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Beta::Threads::Runs::StepListParams} for more details. + # + # Returns a list of run steps belonging to a run. # - # @param params [OpenAI::Models::Beta::Threads::Runs::StepListParams, Hash{Symbol=>Object}] . + # @overload list(run_id, thread_id:, after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {}) # - # @option params [String] :thread_id Path param: The ID of the thread the run and run steps belong to. + # @param run_id [String] Path param: The ID of the run the run steps belong to. # - # @option params [String] :after Query param: A cursor for use in pagination. `after` is an object ID that - # defines your place in the list. For instance, if you make a list request and - # receive 100 objects, ending with obj_foo, your subsequent call can include - # after=obj_foo in order to fetch the next page of the list. + # @param thread_id [String] Path param: The ID of the thread the run and run steps belong to. # - # @option params [String] :before Query param: A cursor for use in pagination. `before` is an object ID that - # defines your place in the list. For instance, if you make a list request and - # receive 100 objects, starting with obj_foo, your subsequent call can include - # before=obj_foo in order to fetch the previous page of the list. + # @param after [String] Query param: A cursor for use in pagination. `after` is an object ID that define # - # @option params [Array] :include Query param: A list of additional fields to include in the response. Currently - # the only supported value is - # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file - # search result content. + # @param before [String] Query param: A cursor for use in pagination. `before` is an object ID that defin # - # See the - # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) - # for more information. + # @param include [Array] Query param: A list of additional fields to include in the response. Currently t # - # @option params [Integer] :limit Query param: A limit on the number of objects to be returned. Limit can range - # between 1 and 100, and the default is 20. + # @param limit [Integer] Query param: A limit on the number of objects to be returned. Limit can range be # - # @option params [Symbol, OpenAI::Models::Beta::Threads::Runs::StepListParams::Order] :order Query param: Sort order by the `created_at` timestamp of the objects. `asc` for - # ascending order and `desc` for descending order. + # @param order [Symbol, OpenAI::Models::Beta::Threads::Runs::StepListParams::Order] Query param: Sort order by the `created_at` timestamp of the objects. `asc` for # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::Beta::Threads::Runs::StepListParams def list(run_id, params) - parsed, options = OpenAI::Models::Beta::Threads::Runs::StepListParams.dump_request(params) - thread_id = parsed.delete(:thread_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::Beta::Threads::Runs::StepListParams.dump_request(params) + thread_id = + parsed.delete(:thread_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :get, - path: ["threads/%0s/runs/%1s/steps", thread_id, run_id], + path: ["threads/%1$s/runs/%2$s/steps", thread_id, run_id], query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::Beta::Threads::Runs::RunStep, - options: options + page: OpenAI::Internal::CursorPage, + model: OpenAI::Beta::Threads::Runs::RunStep, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/chat.rb b/lib/openai/resources/chat.rb index 8945e202..9bebeb10 100644 --- a/lib/openai/resources/chat.rb +++ b/lib/openai/resources/chat.rb @@ -6,8 +6,9 @@ class Chat # @return [OpenAI::Resources::Chat::Completions] attr_reader :completions - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client @completions = OpenAI::Resources::Chat::Completions.new(client: client) diff --git a/lib/openai/resources/chat/completions.rb b/lib/openai/resources/chat/completions.rb index 7be5f0ae..d6be9e1e 100644 --- a/lib/openai/resources/chat/completions.rb +++ b/lib/openai/resources/chat/completions.rb @@ -7,552 +7,340 @@ class Completions # @return [OpenAI::Resources::Chat::Completions::Messages] attr_reader :messages - # **Starting a new project?** We recommend trying - # [Responses](https://platform.openai.com/docs/api-reference/responses) to take - # advantage of the latest OpenAI platform features. Compare - # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). - # - # --- - # - # Creates a model response for the given chat conversation. Learn more in the - # [text generation](https://platform.openai.com/docs/guides/text-generation), - # [vision](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio) guides. - # - # Parameter support can differ depending on the model used to generate the - # response, particularly for newer reasoning models. Parameters that are only - # supported for reasoning models are noted below. For the current state of - # unsupported parameters in reasoning models, - # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). - # - # @param params [OpenAI::Models::Chat::CompletionCreateParams, Hash{Symbol=>Object}] . - # - # @option params [Array] :messages A list of messages comprising the conversation so far. Depending on the - # [model](https://platform.openai.com/docs/models) you use, different message - # types (modalities) are supported, like - # [text](https://platform.openai.com/docs/guides/text-generation), - # [images](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio). - # - # @option params [String, Symbol, OpenAI::Models::ChatModel] :model Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. - # - # @option params [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] :audio Parameters for audio output. Required when audio output is requested with - # `modalities: ["audio"]`. - # [Learn more](https://platform.openai.com/docs/guides/audio). - # - # @option params [Float, nil] :frequency_penalty Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. - # - # @option params [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] :function_call Deprecated in favor of `tool_choice`. - # - # Controls which (if any) function is called by the model. + # See {OpenAI::Resources::Chat::Completions#stream_raw} for streaming counterpart. # - # `none` means the model will not call a function and instead generates a message. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionCreateParams} for more details. # - # `auto` means the model can pick between generating a message or calling a - # function. - # - # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. - # - # `none` is the default when no functions are present. `auto` is the default if - # functions are present. - # - # @option params [Array] :functions Deprecated in favor of `tools`. - # - # A list of functions the model may generate JSON inputs for. + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). # - # @option params [Hash{Symbol=>Integer}, nil] :logit_bias Modify the likelihood of specified tokens appearing in the completion. + # --- # - # Accepts a JSON object that maps tokens (specified by their token ID in the - # tokenizer) to an associated bias value from -100 to 100. Mathematically, the - # bias is added to the logits generated by the model prior to sampling. The exact - # effect will vary per model, but values between -1 and 1 should decrease or - # increase likelihood of selection; values like -100 or 100 should result in a ban - # or exclusive selection of the relevant token. + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. # - # @option params [Boolean, nil] :logprobs Whether to return log probabilities of the output tokens or not. If true, - # returns the log probabilities of each output token returned in the `content` of - # `message`. + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). # - # @option params [Integer, nil] :max_completion_tokens An upper bound for the number of tokens that can be generated for a completion, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # - # @option params [Integer, nil] :max_tokens The maximum number of [tokens](/tokenizer) that can be generated in the chat - # completion. This value can be used to control - # [costs](https://openai.com/api/pricing/) for text generated via API. + # @param messages [Array] A list of messages comprising the conversation so far. Depending on the # - # This value is now deprecated in favor of `max_completion_tokens`, and is not - # compatible with - # [o1 series models](https://platform.openai.com/docs/guides/reasoning). + # @param model [String, Symbol, OpenAI::Models::ChatModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param audio [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] Parameters for audio output. Required when audio output is requested with # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on # - # @option params [Array, nil] :modalities Output types that you would like the model to generate. Most models are capable - # of generating text, which is the default: + # @param function_call [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] Deprecated in favor of `tool_choice`. # - # `["text"]` + # @param functions [Array] Deprecated in favor of `tools`. # - # The `gpt-4o-audio-preview` model can also be used to - # [generate audio](https://platform.openai.com/docs/guides/audio). To request that - # this model generate both text and audio responses, you can use: + # @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion. # - # `["text", "audio"]` + # @param logprobs [Boolean, nil] Whether to return log probabilities of the output tokens or not. If true, # - # @option params [Integer, nil] :n How many chat completion choices to generate for each input message. Note that - # you will be charged based on the number of generated tokens across all of the - # choices. Keep `n` as `1` to minimize costs. + # @param max_completion_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a completion, # - # @option params [Boolean] :parallel_tool_calls Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the # - # @option params [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] :prediction Static predicted output content, such as the content of a text file that is - # being regenerated. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [Float, nil] :presence_penalty Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # @param modalities [Array, nil] Output types that you would like the model to generate. # - # @option params [Symbol, OpenAI::Models::ReasoningEffort, nil] :reasoning_effort **o-series models only** + # @param n [Integer, nil] How many chat completion choices to generate for each input message. Note that y # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g # - # @option params [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] :response_format An object specifying the format that the model must output. + # @param prediction [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] Static predicted output content, such as the content of a text file that is # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # - # @option params [Integer, nil] :seed This feature is in Beta. If specified, our system will make a best effort to - # sample deterministically, such that repeated requests with the same `seed` and - # parameters should return the same result. Determinism is not guaranteed, and you - # should refer to the `system_fingerprint` response parameter to monitor changes - # in the backend. + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # - # @option params [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] :service_tier Specifies the latency tier to use for processing the request. This parameter is - # relevant for customers subscribed to the scale tier service: + # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. # - # - If set to 'auto', and the Project is Scale tier enabled, the system will - # utilize scale tier credits until they are exhausted. - # - If set to 'auto', and the Project is not Scale tier enabled, the request will - # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. - # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. - # - When not set, the default behavior is 'auto'. + # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi # - # When this parameter is set, the response body will include the `service_tier` - # utilized. + # @param seed [Integer, nil] This feature is in Beta. # - # @option params [String, Array, nil] :stop Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request. # - # @option params [Boolean, nil] :store Whether or not to store the output of this chat completion request for use in - # our [model distillation](https://platform.openai.com/docs/guides/distillation) - # or [evals](https://platform.openai.com/docs/guides/evals) products. + # @param stop [String, Array, nil] Not supported with latest reasoning models `o3` and `o4-mini`. # - # @option params [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] :stream_options Options for streaming response. Only set this when you set `stream: true`. + # @param store [Boolean, nil] Whether or not to store the output of this chat completion request for # - # @option params [Float, nil] :temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`. # - # @option params [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] :tool_choice Controls which (if any) tool is called by the model. `none` means the model will - # not call any tool and instead generates a message. `auto` means the model can - # pick between generating a message or calling one or more tools. `required` means - # the model must call one or more tools. Specifying a particular tool via - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # `none` is the default when no tools are present. `auto` is the default if tools - # are present. + # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model. # - # @option params [Array] :tools A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # @param tools [Array] A list of tools the model may call. You can provide either # - # @option params [Integer, nil] :top_logprobs An integer between 0 and 20 specifying the number of most likely tokens to - # return at each token position, each with an associated log probability. - # `logprobs` must be set to `true` if this parameter is used. + # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # - # @option params [Float, nil] :top_p An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, # - # We generally recommend altering this or `temperature` but not both. + # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use # - # @option params [String] :user A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in # - # @option params [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] :web_search_options This tool searches the web for relevant results to use in a response. Learn more - # about the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Chat::ChatCompletion] # + # @see OpenAI::Models::Chat::CompletionCreateParams def create(params) - parsed, options = OpenAI::Models::Chat::CompletionCreateParams.dump_request(params) - parsed.delete(:stream) + parsed, options = OpenAI::Chat::CompletionCreateParams.dump_request(params) + if parsed[:stream] + message = "Please use `#stream_raw` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "chat/completions", body: parsed, - model: OpenAI::Models::Chat::ChatCompletion, + model: OpenAI::Chat::ChatCompletion, options: options ) end - # **Starting a new project?** We recommend trying - # [Responses](https://platform.openai.com/docs/api-reference/responses) to take - # advantage of the latest OpenAI platform features. Compare - # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). - # - # --- - # - # Creates a model response for the given chat conversation. Learn more in the - # [text generation](https://platform.openai.com/docs/guides/text-generation), - # [vision](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio) guides. - # - # Parameter support can differ depending on the model used to generate the - # response, particularly for newer reasoning models. Parameters that are only - # supported for reasoning models are noted below. For the current state of - # unsupported parameters in reasoning models, - # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). - # - # @param params [OpenAI::Models::Chat::CompletionCreateParams, Hash{Symbol=>Object}] . - # - # @option params [Array] :messages A list of messages comprising the conversation so far. Depending on the - # [model](https://platform.openai.com/docs/models) you use, different message - # types (modalities) are supported, like - # [text](https://platform.openai.com/docs/guides/text-generation), - # [images](https://platform.openai.com/docs/guides/vision), and - # [audio](https://platform.openai.com/docs/guides/audio). + # See {OpenAI::Resources::Chat::Completions#create} for non-streaming counterpart. # - # @option params [String, Symbol, OpenAI::Models::ChatModel] :model Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionCreateParams} for more details. # - # @option params [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] :audio Parameters for audio output. Required when audio output is requested with - # `modalities: ["audio"]`. - # [Learn more](https://platform.openai.com/docs/guides/audio). - # - # @option params [Float, nil] :frequency_penalty Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. - # - # @option params [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] :function_call Deprecated in favor of `tool_choice`. - # - # Controls which (if any) function is called by the model. - # - # `none` means the model will not call a function and instead generates a message. - # - # `auto` means the model can pick between generating a message or calling a - # function. - # - # Specifying a particular function via `{"name": "my_function"}` forces the model - # to call that function. - # - # `none` is the default when no functions are present. `auto` is the default if - # functions are present. - # - # @option params [Array] :functions Deprecated in favor of `tools`. - # - # A list of functions the model may generate JSON inputs for. + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). # - # @option params [Hash{Symbol=>Integer}, nil] :logit_bias Modify the likelihood of specified tokens appearing in the completion. + # --- # - # Accepts a JSON object that maps tokens (specified by their token ID in the - # tokenizer) to an associated bias value from -100 to 100. Mathematically, the - # bias is added to the logits generated by the model prior to sampling. The exact - # effect will vary per model, but values between -1 and 1 should decrease or - # increase likelihood of selection; values like -100 or 100 should result in a ban - # or exclusive selection of the relevant token. + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. # - # @option params [Boolean, nil] :logprobs Whether to return log probabilities of the output tokens or not. If true, - # returns the log probabilities of each output token returned in the `content` of - # `message`. + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). # - # @option params [Integer, nil] :max_completion_tokens An upper bound for the number of tokens that can be generated for a completion, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # - # @option params [Integer, nil] :max_tokens The maximum number of [tokens](/tokenizer) that can be generated in the chat - # completion. This value can be used to control - # [costs](https://openai.com/api/pricing/) for text generated via API. + # @param messages [Array] A list of messages comprising the conversation so far. Depending on the # - # This value is now deprecated in favor of `max_completion_tokens`, and is not - # compatible with - # [o1 series models](https://platform.openai.com/docs/guides/reasoning). + # @param model [String, Symbol, OpenAI::Models::ChatModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param audio [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] Parameters for audio output. Required when audio output is requested with # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on # - # @option params [Array, nil] :modalities Output types that you would like the model to generate. Most models are capable - # of generating text, which is the default: + # @param function_call [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] Deprecated in favor of `tool_choice`. # - # `["text"]` + # @param functions [Array] Deprecated in favor of `tools`. # - # The `gpt-4o-audio-preview` model can also be used to - # [generate audio](https://platform.openai.com/docs/guides/audio). To request that - # this model generate both text and audio responses, you can use: + # @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion. # - # `["text", "audio"]` + # @param logprobs [Boolean, nil] Whether to return log probabilities of the output tokens or not. If true, # - # @option params [Integer, nil] :n How many chat completion choices to generate for each input message. Note that - # you will be charged based on the number of generated tokens across all of the - # choices. Keep `n` as `1` to minimize costs. + # @param max_completion_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a completion, # - # @option params [Boolean] :parallel_tool_calls Whether to enable - # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) - # during tool use. + # @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the # - # @option params [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] :prediction Static predicted output content, such as the content of a text file that is - # being regenerated. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [Float, nil] :presence_penalty Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # @param modalities [Array, nil] Output types that you would like the model to generate. # - # @option params [Symbol, OpenAI::Models::ReasoningEffort, nil] :reasoning_effort **o-series models only** + # @param n [Integer, nil] How many chat completion choices to generate for each input message. Note that y # - # Constrains effort on reasoning for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g # - # @option params [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] :response_format An object specifying the format that the model must output. + # @param prediction [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] Static predicted output content, such as the content of a text file that is # - # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - # Outputs which ensures the model will match your supplied JSON schema. Learn more - # in the - # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on # - # Setting to `{ "type": "json_object" }` enables the older JSON mode, which - # ensures the message the model generates is valid JSON. Using `json_schema` is - # preferred for models that support it. + # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # - # @option params [Integer, nil] :seed This feature is in Beta. If specified, our system will make a best effort to - # sample deterministically, such that repeated requests with the same `seed` and - # parameters should return the same result. Determinism is not guaranteed, and you - # should refer to the `system_fingerprint` response parameter to monitor changes - # in the backend. + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # - # @option params [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] :service_tier Specifies the latency tier to use for processing the request. This parameter is - # relevant for customers subscribed to the scale tier service: + # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. # - # - If set to 'auto', and the Project is Scale tier enabled, the system will - # utilize scale tier credits until they are exhausted. - # - If set to 'auto', and the Project is not Scale tier enabled, the request will - # be processed using the default service tier with a lower uptime SLA and no - # latency guarentee. - # - If set to 'default', the request will be processed using the default service - # tier with a lower uptime SLA and no latency guarentee. - # - When not set, the default behavior is 'auto'. + # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi # - # When this parameter is set, the response body will include the `service_tier` - # utilized. + # @param seed [Integer, nil] This feature is in Beta. # - # @option params [String, Array, nil] :stop Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request. # - # @option params [Boolean, nil] :store Whether or not to store the output of this chat completion request for use in - # our [model distillation](https://platform.openai.com/docs/guides/distillation) - # or [evals](https://platform.openai.com/docs/guides/evals) products. + # @param stop [String, Array, nil] Not supported with latest reasoning models `o3` and `o4-mini`. # - # @option params [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] :stream_options Options for streaming response. Only set this when you set `stream: true`. + # @param store [Boolean, nil] Whether or not to store the output of this chat completion request for # - # @option params [Float, nil] :temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`. # - # @option params [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] :tool_choice Controls which (if any) tool is called by the model. `none` means the model will - # not call any tool and instead generates a message. `auto` means the model can - # pick between generating a message or calling one or more tools. `required` means - # the model must call one or more tools. Specifying a particular tool via - # `{"type": "function", "function": {"name": "my_function"}}` forces the model to - # call that tool. + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # `none` is the default when no tools are present. `auto` is the default if tools - # are present. + # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model. # - # @option params [Array] :tools A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # @param tools [Array] A list of tools the model may call. You can provide either # - # @option params [Integer, nil] :top_logprobs An integer between 0 and 20 specifying the number of most likely tokens to - # return at each token position, each with an associated log probability. - # `logprobs` must be set to `true` if this parameter is used. + # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # - # @option params [Float, nil] :top_p An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, # - # We generally recommend altering this or `temperature` but not both. + # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use # - # @option params [String] :user A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in # - # @option params [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] :web_search_options This tool searches the web for relevant results to use in a response. Learn more - # about the - # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Stream] + # @return [OpenAI::Internal::Stream] # - def create_streaming(params) - parsed, options = OpenAI::Models::Chat::CompletionCreateParams.dump_request(params) + # @see OpenAI::Models::Chat::CompletionCreateParams + def stream_raw(params) + parsed, options = OpenAI::Chat::CompletionCreateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) @client.request( method: :post, path: "chat/completions", headers: {"accept" => "text/event-stream"}, body: parsed, - stream: OpenAI::Stream, - model: OpenAI::Models::Chat::ChatCompletionChunk, + stream: OpenAI::Internal::Stream, + model: OpenAI::Chat::ChatCompletionChunk, options: options ) end # Get a stored chat completion. Only Chat Completions that have been created with - # the `store` parameter set to `true` will be returned. + # the `store` parameter set to `true` will be returned. # - # @param completion_id [String] The ID of the chat completion to retrieve. + # @overload retrieve(completion_id, request_options: {}) # - # @param params [OpenAI::Models::Chat::CompletionRetrieveParams, Hash{Symbol=>Object}] . + # @param completion_id [String] The ID of the chat completion to retrieve. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Chat::ChatCompletion] # + # @see OpenAI::Models::Chat::CompletionRetrieveParams def retrieve(completion_id, params = {}) @client.request( method: :get, - path: ["chat/completions/%0s", completion_id], - model: OpenAI::Models::Chat::ChatCompletion, + path: ["chat/completions/%1$s", completion_id], + model: OpenAI::Chat::ChatCompletion, options: params[:request_options] ) end - # Modify a stored chat completion. Only Chat Completions that have been created - # with the `store` parameter set to `true` can be modified. Currently, the only - # supported modification is to update the `metadata` field. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionUpdateParams} for more details. # - # @param completion_id [String] The ID of the chat completion to update. + # Modify a stored chat completion. Only Chat Completions that have been created + # with the `store` parameter set to `true` can be modified. Currently, the only + # supported modification is to update the `metadata` field. # - # @param params [OpenAI::Models::Chat::CompletionUpdateParams, Hash{Symbol=>Object}] . + # @overload update(completion_id, metadata:, request_options: {}) # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param completion_id [String] The ID of the chat completion to update. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Chat::ChatCompletion] # + # @see OpenAI::Models::Chat::CompletionUpdateParams def update(completion_id, params) - parsed, options = OpenAI::Models::Chat::CompletionUpdateParams.dump_request(params) + parsed, options = OpenAI::Chat::CompletionUpdateParams.dump_request(params) @client.request( method: :post, - path: ["chat/completions/%0s", completion_id], + path: ["chat/completions/%1$s", completion_id], body: parsed, - model: OpenAI::Models::Chat::ChatCompletion, + model: OpenAI::Chat::ChatCompletion, options: options ) end - # List stored Chat Completions. Only Chat Completions that have been stored with - # the `store` parameter set to `true` will be returned. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::CompletionListParams} for more details. # - # @param params [OpenAI::Models::Chat::CompletionListParams, Hash{Symbol=>Object}] . + # List stored Chat Completions. Only Chat Completions that have been stored with + # the `store` parameter set to `true` will be returned. # - # @option params [String] :after Identifier for the last chat completion from the previous pagination request. + # @overload list(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {}) # - # @option params [Integer] :limit Number of Chat Completions to retrieve. + # @param after [String] Identifier for the last chat completion from the previous pagination request. # - # @option params [Hash{Symbol=>String}, nil] :metadata A list of metadata keys to filter the Chat Completions by. Example: + # @param limit [Integer] Number of Chat Completions to retrieve. # - # `metadata[key1]=value1&metadata[key2]=value2` + # @param metadata [Hash{Symbol=>String}, nil] A list of metadata keys to filter the Chat Completions by. Example: # - # @option params [String] :model The model used to generate the Chat Completions. + # @param model [String] The model used to generate the Chat Completions. # - # @option params [Symbol, OpenAI::Models::Chat::CompletionListParams::Order] :order Sort order for Chat Completions by timestamp. Use `asc` for ascending order or - # `desc` for descending order. Defaults to `asc`. + # @param order [Symbol, OpenAI::Models::Chat::CompletionListParams::Order] Sort order for Chat Completions by timestamp. Use `asc` for ascending order or ` # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::Chat::CompletionListParams def list(params = {}) - parsed, options = OpenAI::Models::Chat::CompletionListParams.dump_request(params) + parsed, options = OpenAI::Chat::CompletionListParams.dump_request(params) @client.request( method: :get, path: "chat/completions", query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::Chat::ChatCompletion, + page: OpenAI::Internal::CursorPage, + model: OpenAI::Chat::ChatCompletion, options: options ) end # Delete a stored chat completion. Only Chat Completions that have been created - # with the `store` parameter set to `true` can be deleted. + # with the `store` parameter set to `true` can be deleted. # - # @param completion_id [String] The ID of the chat completion to delete. + # @overload delete(completion_id, request_options: {}) # - # @param params [OpenAI::Models::Chat::CompletionDeleteParams, Hash{Symbol=>Object}] . + # @param completion_id [String] The ID of the chat completion to delete. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Chat::ChatCompletionDeleted] # + # @see OpenAI::Models::Chat::CompletionDeleteParams def delete(completion_id, params = {}) @client.request( method: :delete, - path: ["chat/completions/%0s", completion_id], - model: OpenAI::Models::Chat::ChatCompletionDeleted, + path: ["chat/completions/%1$s", completion_id], + model: OpenAI::Chat::ChatCompletionDeleted, options: params[:request_options] ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client @messages = OpenAI::Resources::Chat::Completions::Messages.new(client: client) diff --git a/lib/openai/resources/chat/completions/messages.rb b/lib/openai/resources/chat/completions/messages.rb index 3bc5880d..ea3e3382 100644 --- a/lib/openai/resources/chat/completions/messages.rb +++ b/lib/openai/resources/chat/completions/messages.rb @@ -5,38 +5,42 @@ module Resources class Chat class Completions class Messages + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::Completions::MessageListParams} for more details. + # # Get the messages in a stored chat completion. Only Chat Completions that have - # been created with the `store` parameter set to `true` will be returned. + # been created with the `store` parameter set to `true` will be returned. # - # @param completion_id [String] The ID of the chat completion to retrieve messages from. + # @overload list(completion_id, after: nil, limit: nil, order: nil, request_options: {}) # - # @param params [OpenAI::Models::Chat::Completions::MessageListParams, Hash{Symbol=>Object}] . + # @param completion_id [String] The ID of the chat completion to retrieve messages from. # - # @option params [String] :after Identifier for the last message from the previous pagination request. + # @param after [String] Identifier for the last message from the previous pagination request. # - # @option params [Integer] :limit Number of messages to retrieve. + # @param limit [Integer] Number of messages to retrieve. # - # @option params [Symbol, OpenAI::Models::Chat::Completions::MessageListParams::Order] :order Sort order for messages by timestamp. Use `asc` for ascending order or `desc` - # for descending order. Defaults to `asc`. + # @param order [Symbol, OpenAI::Models::Chat::Completions::MessageListParams::Order] Sort order for messages by timestamp. Use `asc` for ascending order or `desc` fo # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::Chat::Completions::MessageListParams def list(completion_id, params = {}) - parsed, options = OpenAI::Models::Chat::Completions::MessageListParams.dump_request(params) + parsed, options = OpenAI::Chat::Completions::MessageListParams.dump_request(params) @client.request( method: :get, - path: ["chat/completions/%0s/messages", completion_id], + path: ["chat/completions/%1$s/messages", completion_id], query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::Chat::ChatCompletionStoreMessage, + page: OpenAI::Internal::CursorPage, + model: OpenAI::Chat::ChatCompletionStoreMessage, options: options ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/completions.rb b/lib/openai/resources/completions.rb index 1a6dcb98..de1e8786 100644 --- a/lib/openai/resources/completions.rb +++ b/lib/openai/resources/completions.rb @@ -3,256 +3,138 @@ module OpenAI module Resources class Completions - # Creates a completion for the provided prompt and parameters. - # - # @param params [OpenAI::Models::CompletionCreateParams, Hash{Symbol=>Object}] . - # - # @option params [String, Symbol, OpenAI::Models::CompletionCreateParams::Model::Preset] :model ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - # - # @option params [String, Array, Array, Array>, nil] :prompt The prompt(s) to generate completions for, encoded as a string, array of - # strings, array of tokens, or array of token arrays. - # - # Note that <|endoftext|> is the document separator that the model sees during - # training, so if a prompt is not specified the model will generate as if from the - # beginning of a new document. - # - # @option params [Integer, nil] :best_of Generates `best_of` completions server-side and returns the "best" (the one with - # the highest log probability per token). Results cannot be streamed. - # - # When used with `n`, `best_of` controls the number of candidate completions and - # `n` specifies how many to return – `best_of` must be greater than `n`. - # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # See {OpenAI::Resources::Completions#create_streaming} for streaming counterpart. # - # @option params [Boolean, nil] :echo Echo back the prompt in addition to the completion + # Some parameter documentations has been truncated, see + # {OpenAI::Models::CompletionCreateParams} for more details. # - # @option params [Float, nil] :frequency_penalty Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. - # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) - # - # @option params [Hash{Symbol=>Integer}, nil] :logit_bias Modify the likelihood of specified tokens appearing in the completion. - # - # Accepts a JSON object that maps tokens (specified by their token ID in the GPT - # tokenizer) to an associated bias value from -100 to 100. You can use this - # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. - # Mathematically, the bias is added to the logits generated by the model prior to - # sampling. The exact effect will vary per model, but values between -1 and 1 - # should decrease or increase likelihood of selection; values like -100 or 100 - # should result in a ban or exclusive selection of the relevant token. - # - # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token - # from being generated. - # - # @option params [Integer, nil] :logprobs Include the log probabilities on the `logprobs` most likely output tokens, as - # well the chosen tokens. For example, if `logprobs` is 5, the API will return a - # list of the 5 most likely tokens. The API will always return the `logprob` of - # the sampled token, so there may be up to `logprobs+1` elements in the response. + # Creates a completion for the provided prompt and parameters. # - # The maximum value for `logprobs` is 5. + # @overload create(model:, prompt:, best_of: nil, echo: nil, frequency_penalty: nil, logit_bias: nil, logprobs: nil, max_tokens: nil, n: nil, presence_penalty: nil, seed: nil, stop: nil, stream_options: nil, suffix: nil, temperature: nil, top_p: nil, user: nil, request_options: {}) # - # @option params [Integer, nil] :max_tokens The maximum number of [tokens](/tokenizer) that can be generated in the - # completion. + # @param model [String, Symbol, OpenAI::Models::CompletionCreateParams::Model] ID of the model to use. You can use the [List models](https://platform.openai.co # - # The token count of your prompt plus `max_tokens` cannot exceed the model's - # context length. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. + # @param prompt [String, Array, Array, Array>, nil] The prompt(s) to generate completions for, encoded as a string, array of strings # - # @option params [Integer, nil] :n How many completions to generate for each prompt. + # @param best_of [Integer, nil] Generates `best_of` completions server-side and returns the "best" (the one with # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # @param echo [Boolean, nil] Echo back the prompt in addition to the completion # - # @option params [Float, nil] :presence_penalty Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on their # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion. # - # @option params [Integer, nil] :seed If specified, our system will make a best effort to sample deterministically, - # such that repeated requests with the same `seed` and parameters should return - # the same result. + # @param logprobs [Integer, nil] Include the log probabilities on the `logprobs` most likely output tokens, as we # - # Determinism is not guaranteed, and you should refer to the `system_fingerprint` - # response parameter to monitor changes in the backend. + # @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the completi # - # @option params [String, Array, nil] :stop Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # @param n [Integer, nil] How many completions to generate for each prompt. # - # @option params [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] :stream_options Options for streaming response. Only set this when you set `stream: true`. + # @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on whethe # - # @option params [String, nil] :suffix The suffix that comes after a completion of inserted text. + # @param seed [Integer, nil] If specified, our system will make a best effort to sample deterministically, su # - # This parameter is only supported for `gpt-3.5-turbo-instruct`. + # @param stop [String, Array, nil] Not supported with latest reasoning models `o3` and `o4-mini`. # - # @option params [Float, nil] :temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`. # - # We generally recommend altering this or `top_p` but not both. + # @param suffix [String, nil] The suffix that comes after a completion of inserted text. # - # @option params [Float, nil] :top_p An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # We generally recommend altering this or `temperature` but not both. + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the # - # @option params [String] :user A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Completion] # + # @see OpenAI::Models::CompletionCreateParams def create(params) - parsed, options = OpenAI::Models::CompletionCreateParams.dump_request(params) - parsed.delete(:stream) + parsed, options = OpenAI::CompletionCreateParams.dump_request(params) + if parsed[:stream] + message = "Please use `#create_streaming` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "completions", body: parsed, - model: OpenAI::Models::Completion, + model: OpenAI::Completion, options: options ) end - # Creates a completion for the provided prompt and parameters. + # See {OpenAI::Resources::Completions#create} for non-streaming counterpart. # - # @param params [OpenAI::Models::CompletionCreateParams, Hash{Symbol=>Object}] . + # Some parameter documentations has been truncated, see + # {OpenAI::Models::CompletionCreateParams} for more details. # - # @option params [String, Symbol, OpenAI::Models::CompletionCreateParams::Model::Preset] :model ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. - # - # @option params [String, Array, Array, Array>, nil] :prompt The prompt(s) to generate completions for, encoded as a string, array of - # strings, array of tokens, or array of token arrays. - # - # Note that <|endoftext|> is the document separator that the model sees during - # training, so if a prompt is not specified the model will generate as if from the - # beginning of a new document. - # - # @option params [Integer, nil] :best_of Generates `best_of` completions server-side and returns the "best" (the one with - # the highest log probability per token). Results cannot be streamed. - # - # When used with `n`, `best_of` controls the number of candidate completions and - # `n` specifies how many to return – `best_of` must be greater than `n`. - # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. - # - # @option params [Boolean, nil] :echo Echo back the prompt in addition to the completion - # - # @option params [Float, nil] :frequency_penalty Number between -2.0 and 2.0. Positive values penalize new tokens based on their - # existing frequency in the text so far, decreasing the model's likelihood to - # repeat the same line verbatim. - # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) - # - # @option params [Hash{Symbol=>Integer}, nil] :logit_bias Modify the likelihood of specified tokens appearing in the completion. - # - # Accepts a JSON object that maps tokens (specified by their token ID in the GPT - # tokenizer) to an associated bias value from -100 to 100. You can use this - # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. - # Mathematically, the bias is added to the logits generated by the model prior to - # sampling. The exact effect will vary per model, but values between -1 and 1 - # should decrease or increase likelihood of selection; values like -100 or 100 - # should result in a ban or exclusive selection of the relevant token. - # - # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token - # from being generated. - # - # @option params [Integer, nil] :logprobs Include the log probabilities on the `logprobs` most likely output tokens, as - # well the chosen tokens. For example, if `logprobs` is 5, the API will return a - # list of the 5 most likely tokens. The API will always return the `logprob` of - # the sampled token, so there may be up to `logprobs+1` elements in the response. + # Creates a completion for the provided prompt and parameters. # - # The maximum value for `logprobs` is 5. + # @overload create_streaming(model:, prompt:, best_of: nil, echo: nil, frequency_penalty: nil, logit_bias: nil, logprobs: nil, max_tokens: nil, n: nil, presence_penalty: nil, seed: nil, stop: nil, stream_options: nil, suffix: nil, temperature: nil, top_p: nil, user: nil, request_options: {}) # - # @option params [Integer, nil] :max_tokens The maximum number of [tokens](/tokenizer) that can be generated in the - # completion. + # @param model [String, Symbol, OpenAI::Models::CompletionCreateParams::Model] ID of the model to use. You can use the [List models](https://platform.openai.co # - # The token count of your prompt plus `max_tokens` cannot exceed the model's - # context length. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. + # @param prompt [String, Array, Array, Array>, nil] The prompt(s) to generate completions for, encoded as a string, array of strings # - # @option params [Integer, nil] :n How many completions to generate for each prompt. + # @param best_of [Integer, nil] Generates `best_of` completions server-side and returns the "best" (the one with # - # **Note:** Because this parameter generates many completions, it can quickly - # consume your token quota. Use carefully and ensure that you have reasonable - # settings for `max_tokens` and `stop`. + # @param echo [Boolean, nil] Echo back the prompt in addition to the completion # - # @option params [Float, nil] :presence_penalty Number between -2.0 and 2.0. Positive values penalize new tokens based on - # whether they appear in the text so far, increasing the model's likelihood to - # talk about new topics. + # @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on their # - # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + # @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion. # - # @option params [Integer, nil] :seed If specified, our system will make a best effort to sample deterministically, - # such that repeated requests with the same `seed` and parameters should return - # the same result. + # @param logprobs [Integer, nil] Include the log probabilities on the `logprobs` most likely output tokens, as we # - # Determinism is not guaranteed, and you should refer to the `system_fingerprint` - # response parameter to monitor changes in the backend. + # @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the completi # - # @option params [String, Array, nil] :stop Up to 4 sequences where the API will stop generating further tokens. The - # returned text will not contain the stop sequence. + # @param n [Integer, nil] How many completions to generate for each prompt. # - # @option params [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] :stream_options Options for streaming response. Only set this when you set `stream: true`. + # @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on whethe # - # @option params [String, nil] :suffix The suffix that comes after a completion of inserted text. + # @param seed [Integer, nil] If specified, our system will make a best effort to sample deterministically, su # - # This parameter is only supported for `gpt-3.5-turbo-instruct`. + # @param stop [String, Array, nil] Not supported with latest reasoning models `o3` and `o4-mini`. # - # @option params [Float, nil] :temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. + # @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`. # - # We generally recommend altering this or `top_p` but not both. + # @param suffix [String, nil] The suffix that comes after a completion of inserted text. # - # @option params [Float, nil] :top_p An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # We generally recommend altering this or `temperature` but not both. + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the # - # @option params [String] :user A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Stream] + # @return [OpenAI::Internal::Stream] # + # @see OpenAI::Models::CompletionCreateParams def create_streaming(params) - parsed, options = OpenAI::Models::CompletionCreateParams.dump_request(params) + parsed, options = OpenAI::CompletionCreateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) @client.request( method: :post, path: "completions", headers: {"accept" => "text/event-stream"}, body: parsed, - stream: OpenAI::Stream, - model: OpenAI::Models::Completion, + stream: OpenAI::Internal::Stream, + model: OpenAI::Completion, options: options ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/containers.rb b/lib/openai/resources/containers.rb new file mode 100644 index 00000000..2d582be8 --- /dev/null +++ b/lib/openai/resources/containers.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Containers + # @return [OpenAI::Resources::Containers::Files] + attr_reader :files + + # Create Container + # + # @overload create(name:, expires_after: nil, file_ids: nil, request_options: {}) + # + # @param name [String] Name of the container to create. + # + # @param expires_after [OpenAI::Models::ContainerCreateParams::ExpiresAfter] Container expiration time in seconds relative to the 'anchor' time. + # + # @param file_ids [Array] IDs of files to copy to the container. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::ContainerCreateResponse] + # + # @see OpenAI::Models::ContainerCreateParams + def create(params) + parsed, options = OpenAI::ContainerCreateParams.dump_request(params) + @client.request( + method: :post, + path: "containers", + body: parsed, + model: OpenAI::Models::ContainerCreateResponse, + options: options + ) + end + + # Retrieve Container + # + # @overload retrieve(container_id, request_options: {}) + # + # @param container_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::ContainerRetrieveResponse] + # + # @see OpenAI::Models::ContainerRetrieveParams + def retrieve(container_id, params = {}) + @client.request( + method: :get, + path: ["containers/%1$s", container_id], + model: OpenAI::Models::ContainerRetrieveResponse, + options: params[:request_options] + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ContainerListParams} for more details. + # + # List Containers + # + # @overload list(after: nil, limit: nil, order: nil, request_options: {}) + # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::ContainerListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::CursorPage] + # + # @see OpenAI::Models::ContainerListParams + def list(params = {}) + parsed, options = OpenAI::ContainerListParams.dump_request(params) + @client.request( + method: :get, + path: "containers", + query: parsed, + page: OpenAI::Internal::CursorPage, + model: OpenAI::Models::ContainerListResponse, + options: options + ) + end + + # Delete Container + # + # @overload delete(container_id, request_options: {}) + # + # @param container_id [String] The ID of the container to delete. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [nil] + # + # @see OpenAI::Models::ContainerDeleteParams + def delete(container_id, params = {}) + @client.request( + method: :delete, + path: ["containers/%1$s", container_id], + model: NilClass, + options: params[:request_options] + ) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + @files = OpenAI::Resources::Containers::Files.new(client: client) + end + end + end +end diff --git a/lib/openai/resources/containers/files.rb b/lib/openai/resources/containers/files.rb new file mode 100644 index 00000000..356bead3 --- /dev/null +++ b/lib/openai/resources/containers/files.rb @@ -0,0 +1,135 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Containers + class Files + # @return [OpenAI::Resources::Containers::Files::Content] + attr_reader :content + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Containers::FileCreateParams} for more details. + # + # Create a Container File + # + # You can send either a multipart/form-data request with the raw file content, or + # a JSON request with a file ID. + # + # @overload create(container_id, file: nil, file_id: nil, request_options: {}) + # + # @param container_id [String] + # + # @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The File object (not file name) to be uploaded. + # + # @param file_id [String] Name of the file to create. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Containers::FileCreateResponse] + # + # @see OpenAI::Models::Containers::FileCreateParams + def create(container_id, params = {}) + parsed, options = OpenAI::Containers::FileCreateParams.dump_request(params) + @client.request( + method: :post, + path: ["containers/%1$s/files", container_id], + headers: {"content-type" => "multipart/form-data"}, + body: parsed, + model: OpenAI::Models::Containers::FileCreateResponse, + options: options + ) + end + + # Retrieve Container File + # + # @overload retrieve(file_id, container_id:, request_options: {}) + # + # @param file_id [String] + # @param container_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Containers::FileRetrieveResponse] + # + # @see OpenAI::Models::Containers::FileRetrieveParams + def retrieve(file_id, params) + parsed, options = OpenAI::Containers::FileRetrieveParams.dump_request(params) + container_id = + parsed.delete(:container_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :get, + path: ["containers/%1$s/files/%2$s", container_id, file_id], + model: OpenAI::Models::Containers::FileRetrieveResponse, + options: options + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Containers::FileListParams} for more details. + # + # List Container files + # + # @overload list(container_id, after: nil, limit: nil, order: nil, request_options: {}) + # + # @param container_id [String] + # + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 + # + # @param order [Symbol, OpenAI::Models::Containers::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::CursorPage] + # + # @see OpenAI::Models::Containers::FileListParams + def list(container_id, params = {}) + parsed, options = OpenAI::Containers::FileListParams.dump_request(params) + @client.request( + method: :get, + path: ["containers/%1$s/files", container_id], + query: parsed, + page: OpenAI::Internal::CursorPage, + model: OpenAI::Models::Containers::FileListResponse, + options: options + ) + end + + # Delete Container File + # + # @overload delete(file_id, container_id:, request_options: {}) + # + # @param file_id [String] + # @param container_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [nil] + # + # @see OpenAI::Models::Containers::FileDeleteParams + def delete(file_id, params) + parsed, options = OpenAI::Containers::FileDeleteParams.dump_request(params) + container_id = + parsed.delete(:container_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :delete, + path: ["containers/%1$s/files/%2$s", container_id, file_id], + model: NilClass, + options: options + ) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + @content = OpenAI::Resources::Containers::Files::Content.new(client: client) + end + end + end + end +end diff --git a/lib/openai/resources/containers/files/content.rb b/lib/openai/resources/containers/files/content.rb new file mode 100644 index 00000000..ba07f678 --- /dev/null +++ b/lib/openai/resources/containers/files/content.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Containers + class Files + class Content + # Retrieve Container File Content + # + # @overload retrieve(file_id, container_id:, request_options: {}) + # + # @param file_id [String] + # @param container_id [String] + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [StringIO] + # + # @see OpenAI::Models::Containers::Files::ContentRetrieveParams + def retrieve(file_id, params) + parsed, options = OpenAI::Containers::Files::ContentRetrieveParams.dump_request(params) + container_id = + parsed.delete(:container_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :get, + path: ["containers/%1$s/files/%2$s/content", container_id, file_id], + headers: {"accept" => "application/binary"}, + model: StringIO, + options: options + ) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + end + end + end + end + end +end diff --git a/lib/openai/resources/conversations.rb b/lib/openai/resources/conversations.rb new file mode 100644 index 00000000..34d66da5 --- /dev/null +++ b/lib/openai/resources/conversations.rb @@ -0,0 +1,112 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Conversations + # @return [OpenAI::Resources::Conversations::Items] + attr_reader :items + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationCreateParams} for more details. + # + # Create a conversation with the given ID. + # + # @overload create(items: nil, metadata: nil, request_options: {}) + # + # @param items [Array, nil] Initial items to include in the conversation context. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. Useful for + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Conversations::Conversation] + # + # @see OpenAI::Models::Conversations::ConversationCreateParams + def create(params = {}) + parsed, options = OpenAI::Conversations::ConversationCreateParams.dump_request(params) + @client.request( + method: :post, + path: "conversations", + body: parsed, + model: OpenAI::Conversations::Conversation, + options: options + ) + end + + # Get a conversation with the given ID. + # + # @overload retrieve(conversation_id, request_options: {}) + # + # @param conversation_id [String] The ID of the conversation to retrieve. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Conversations::Conversation] + # + # @see OpenAI::Models::Conversations::ConversationRetrieveParams + def retrieve(conversation_id, params = {}) + @client.request( + method: :get, + path: ["conversations/%1$s", conversation_id], + model: OpenAI::Conversations::Conversation, + options: params[:request_options] + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ConversationUpdateParams} for more details. + # + # Update a conversation's metadata with the given ID. + # + # @overload update(conversation_id, metadata:, request_options: {}) + # + # @param conversation_id [String] The ID of the conversation to update. + # + # @param metadata [Hash{Symbol=>String}] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Conversations::Conversation] + # + # @see OpenAI::Models::Conversations::ConversationUpdateParams + def update(conversation_id, params) + parsed, options = OpenAI::Conversations::ConversationUpdateParams.dump_request(params) + @client.request( + method: :post, + path: ["conversations/%1$s", conversation_id], + body: parsed, + model: OpenAI::Conversations::Conversation, + options: options + ) + end + + # Delete a conversation with the given ID. + # + # @overload delete(conversation_id, request_options: {}) + # + # @param conversation_id [String] The ID of the conversation to delete. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Conversations::ConversationDeletedResource] + # + # @see OpenAI::Models::Conversations::ConversationDeleteParams + def delete(conversation_id, params = {}) + @client.request( + method: :delete, + path: ["conversations/%1$s", conversation_id], + model: OpenAI::Conversations::ConversationDeletedResource, + options: params[:request_options] + ) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + @items = OpenAI::Resources::Conversations::Items.new(client: client) + end + end + end +end diff --git a/lib/openai/resources/conversations/items.rb b/lib/openai/resources/conversations/items.rb new file mode 100644 index 00000000..90bfac5a --- /dev/null +++ b/lib/openai/resources/conversations/items.rb @@ -0,0 +1,141 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Conversations + class Items + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ItemCreateParams} for more details. + # + # Create items in a conversation with the given ID. + # + # @overload create(conversation_id, items:, include: nil, request_options: {}) + # + # @param conversation_id [String] Path param: The ID of the conversation to add the item to. + # + # @param items [Array] Body param: The items to add to the conversation. You may add up to 20 items at + # + # @param include [Array] Query param: Additional fields to include in the response. See the `include` + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Conversations::ConversationItemList] + # + # @see OpenAI::Models::Conversations::ItemCreateParams + def create(conversation_id, params) + parsed, options = OpenAI::Conversations::ItemCreateParams.dump_request(params) + query_params = [:include] + @client.request( + method: :post, + path: ["conversations/%1$s/items", conversation_id], + query: parsed.slice(*query_params), + body: parsed.except(*query_params), + model: OpenAI::Conversations::ConversationItemList, + options: options + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ItemRetrieveParams} for more details. + # + # Get a single item from a conversation with the given IDs. + # + # @overload retrieve(item_id, conversation_id:, include: nil, request_options: {}) + # + # @param item_id [String] Path param: The ID of the item to retrieve. + # + # @param conversation_id [String] Path param: The ID of the conversation that contains the item. + # + # @param include [Array] Query param: Additional fields to include in the response. See the `include` + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput] + # + # @see OpenAI::Models::Conversations::ItemRetrieveParams + def retrieve(item_id, params) + parsed, options = OpenAI::Conversations::ItemRetrieveParams.dump_request(params) + conversation_id = + parsed.delete(:conversation_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :get, + path: ["conversations/%1$s/items/%2$s", conversation_id, item_id], + query: parsed, + model: OpenAI::Conversations::ConversationItem, + options: options + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Conversations::ItemListParams} for more details. + # + # List all items for a conversation with the given ID. + # + # @overload list(conversation_id, after: nil, include: nil, limit: nil, order: nil, request_options: {}) + # + # @param conversation_id [String] The ID of the conversation to list items for. + # + # @param after [String] An item ID to list items after, used in pagination. + # + # @param include [Array] Specify additional output data to include in the model response. Currently + # + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between + # + # @param order [Symbol, OpenAI::Models::Conversations::ItemListParams::Order] The order to return the input items in. Default is `desc`. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::ConversationCursorPage] + # + # @see OpenAI::Models::Conversations::ItemListParams + def list(conversation_id, params = {}) + parsed, options = OpenAI::Conversations::ItemListParams.dump_request(params) + @client.request( + method: :get, + path: ["conversations/%1$s/items", conversation_id], + query: parsed, + page: OpenAI::Internal::ConversationCursorPage, + model: OpenAI::Conversations::ConversationItem, + options: options + ) + end + + # Delete an item from a conversation with the given IDs. + # + # @overload delete(item_id, conversation_id:, request_options: {}) + # + # @param item_id [String] The ID of the item to delete. + # + # @param conversation_id [String] The ID of the conversation that contains the item. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Conversations::Conversation] + # + # @see OpenAI::Models::Conversations::ItemDeleteParams + def delete(item_id, params) + parsed, options = OpenAI::Conversations::ItemDeleteParams.dump_request(params) + conversation_id = + parsed.delete(:conversation_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :delete, + path: ["conversations/%1$s/items/%2$s", conversation_id, item_id], + model: OpenAI::Conversations::Conversation, + options: options + ) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + end + end + end + end +end diff --git a/lib/openai/resources/embeddings.rb b/lib/openai/resources/embeddings.rb index b5e6eda1..934b58cc 100644 --- a/lib/openai/resources/embeddings.rb +++ b/lib/openai/resources/embeddings.rb @@ -3,52 +3,42 @@ module OpenAI module Resources class Embeddings + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EmbeddingCreateParams} for more details. + # # Creates an embedding vector representing the input text. # - # @param params [OpenAI::Models::EmbeddingCreateParams, Hash{Symbol=>Object}] . + # @overload create(input:, model:, dimensions: nil, encoding_format: nil, user: nil, request_options: {}) # - # @option params [String, Array, Array, Array>] :input Input text to embed, encoded as a string or array of tokens. To embed multiple - # inputs in a single request, pass an array of strings or array of token arrays. - # The input must not exceed the max input tokens for the model (8192 tokens for - # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 - # dimensions or less. - # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - # for counting tokens. Some models may also impose a limit on total number of - # tokens summed across inputs. + # @param input [String, Array, Array, Array>] Input text to embed, encoded as a string or array of tokens. To embed multiple i # - # @option params [String, Symbol, OpenAI::Models::EmbeddingModel] :model ID of the model to use. You can use the - # [List models](https://platform.openai.com/docs/api-reference/models/list) API to - # see all of your available models, or see our - # [Model overview](https://platform.openai.com/docs/models) for descriptions of - # them. + # @param model [String, Symbol, OpenAI::Models::EmbeddingModel] ID of the model to use. You can use the [List models](https://platform.openai.co # - # @option params [Integer] :dimensions The number of dimensions the resulting output embeddings should have. Only - # supported in `text-embedding-3` and later models. + # @param dimensions [Integer] The number of dimensions the resulting output embeddings should have. Only suppo # - # @option params [Symbol, OpenAI::Models::EmbeddingCreateParams::EncodingFormat] :encoding_format The format to return the embeddings in. Can be either `float` or - # [`base64`](https://pypi.org/project/pybase64/). + # @param encoding_format [Symbol, OpenAI::Models::EmbeddingCreateParams::EncodingFormat] The format to return the embeddings in. Can be either `float` or [`base64`](http # - # @option params [String] :user A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::CreateEmbeddingResponse] # + # @see OpenAI::Models::EmbeddingCreateParams def create(params) - parsed, options = OpenAI::Models::EmbeddingCreateParams.dump_request(params) + parsed, options = OpenAI::EmbeddingCreateParams.dump_request(params) @client.request( method: :post, path: "embeddings", body: parsed, - model: OpenAI::Models::CreateEmbeddingResponse, + model: OpenAI::CreateEmbeddingResponse, options: options ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/evals.rb b/lib/openai/resources/evals.rb new file mode 100644 index 00000000..cabdba94 --- /dev/null +++ b/lib/openai/resources/evals.rb @@ -0,0 +1,155 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Evals + # @return [OpenAI::Resources::Evals::Runs] + attr_reader :runs + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalCreateParams} for more details. + # + # Create the structure of an evaluation that can be used to test a model's + # performance. An evaluation is a set of testing criteria and the config for a + # data source, which dictates the schema of the data used in the evaluation. After + # creating an evaluation, you can run it on different models and model parameters. + # We support several types of graders and datasources. For more information, see + # the [Evals guide](https://platform.openai.com/docs/guides/evals). + # + # @overload create(data_source_config:, testing_criteria:, metadata: nil, name: nil, request_options: {}) + # + # @param data_source_config [OpenAI::Models::EvalCreateParams::DataSourceConfig::Custom, OpenAI::Models::EvalCreateParams::DataSourceConfig::Logs, OpenAI::Models::EvalCreateParams::DataSourceConfig::StoredCompletions] The configuration for the data source used for the evaluation runs. Dictates the + # + # @param testing_criteria [Array] A list of graders for all eval runs in this group. Graders can reference variabl + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] The name of the evaluation. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::EvalCreateResponse] + # + # @see OpenAI::Models::EvalCreateParams + def create(params) + parsed, options = OpenAI::EvalCreateParams.dump_request(params) + @client.request( + method: :post, + path: "evals", + body: parsed, + model: OpenAI::Models::EvalCreateResponse, + options: options + ) + end + + # Get an evaluation by ID. + # + # @overload retrieve(eval_id, request_options: {}) + # + # @param eval_id [String] The ID of the evaluation to retrieve. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::EvalRetrieveResponse] + # + # @see OpenAI::Models::EvalRetrieveParams + def retrieve(eval_id, params = {}) + @client.request( + method: :get, + path: ["evals/%1$s", eval_id], + model: OpenAI::Models::EvalRetrieveResponse, + options: params[:request_options] + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalUpdateParams} for more details. + # + # Update certain properties of an evaluation. + # + # @overload update(eval_id, metadata: nil, name: nil, request_options: {}) + # + # @param eval_id [String] The ID of the evaluation to update. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] Rename the evaluation. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::EvalUpdateResponse] + # + # @see OpenAI::Models::EvalUpdateParams + def update(eval_id, params = {}) + parsed, options = OpenAI::EvalUpdateParams.dump_request(params) + @client.request( + method: :post, + path: ["evals/%1$s", eval_id], + body: parsed, + model: OpenAI::Models::EvalUpdateResponse, + options: options + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::EvalListParams} for more details. + # + # List evaluations for a project. + # + # @overload list(after: nil, limit: nil, order: nil, order_by: nil, request_options: {}) + # + # @param after [String] Identifier for the last eval from the previous pagination request. + # + # @param limit [Integer] Number of evals to retrieve. + # + # @param order [Symbol, OpenAI::Models::EvalListParams::Order] Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for d + # + # @param order_by [Symbol, OpenAI::Models::EvalListParams::OrderBy] Evals can be ordered by creation time or last updated time. Use + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::CursorPage] + # + # @see OpenAI::Models::EvalListParams + def list(params = {}) + parsed, options = OpenAI::EvalListParams.dump_request(params) + @client.request( + method: :get, + path: "evals", + query: parsed, + page: OpenAI::Internal::CursorPage, + model: OpenAI::Models::EvalListResponse, + options: options + ) + end + + # Delete an evaluation. + # + # @overload delete(eval_id, request_options: {}) + # + # @param eval_id [String] The ID of the evaluation to delete. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::EvalDeleteResponse] + # + # @see OpenAI::Models::EvalDeleteParams + def delete(eval_id, params = {}) + @client.request( + method: :delete, + path: ["evals/%1$s", eval_id], + model: OpenAI::Models::EvalDeleteResponse, + options: params[:request_options] + ) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + @runs = OpenAI::Resources::Evals::Runs.new(client: client) + end + end + end +end diff --git a/lib/openai/resources/evals/runs.rb b/lib/openai/resources/evals/runs.rb new file mode 100644 index 00000000..68e1590e --- /dev/null +++ b/lib/openai/resources/evals/runs.rb @@ -0,0 +1,168 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Evals + class Runs + # @return [OpenAI::Resources::Evals::Runs::OutputItems] + attr_reader :output_items + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunCreateParams} for more details. + # + # Kicks off a new run for a given evaluation, specifying the data source, and what + # model configuration to use to test. The datasource will be validated against the + # schema specified in the config of the evaluation. + # + # @overload create(eval_id, data_source:, metadata: nil, name: nil, request_options: {}) + # + # @param eval_id [String] The ID of the evaluation to create a run for. + # + # @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource] Details about the run's data source. + # + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be + # + # @param name [String] The name of the run. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Evals::RunCreateResponse] + # + # @see OpenAI::Models::Evals::RunCreateParams + def create(eval_id, params) + parsed, options = OpenAI::Evals::RunCreateParams.dump_request(params) + @client.request( + method: :post, + path: ["evals/%1$s/runs", eval_id], + body: parsed, + model: OpenAI::Models::Evals::RunCreateResponse, + options: options + ) + end + + # Get an evaluation run by ID. + # + # @overload retrieve(run_id, eval_id:, request_options: {}) + # + # @param run_id [String] The ID of the run to retrieve. + # + # @param eval_id [String] The ID of the evaluation to retrieve runs for. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Evals::RunRetrieveResponse] + # + # @see OpenAI::Models::Evals::RunRetrieveParams + def retrieve(run_id, params) + parsed, options = OpenAI::Evals::RunRetrieveParams.dump_request(params) + eval_id = + parsed.delete(:eval_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :get, + path: ["evals/%1$s/runs/%2$s", eval_id, run_id], + model: OpenAI::Models::Evals::RunRetrieveResponse, + options: options + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::RunListParams} for more details. + # + # Get a list of runs for an evaluation. + # + # @overload list(eval_id, after: nil, limit: nil, order: nil, status: nil, request_options: {}) + # + # @param eval_id [String] The ID of the evaluation to retrieve runs for. + # + # @param after [String] Identifier for the last run from the previous pagination request. + # + # @param limit [Integer] Number of runs to retrieve. + # + # @param order [Symbol, OpenAI::Models::Evals::RunListParams::Order] Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for de + # + # @param status [Symbol, OpenAI::Models::Evals::RunListParams::Status] Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::CursorPage] + # + # @see OpenAI::Models::Evals::RunListParams + def list(eval_id, params = {}) + parsed, options = OpenAI::Evals::RunListParams.dump_request(params) + @client.request( + method: :get, + path: ["evals/%1$s/runs", eval_id], + query: parsed, + page: OpenAI::Internal::CursorPage, + model: OpenAI::Models::Evals::RunListResponse, + options: options + ) + end + + # Delete an eval run. + # + # @overload delete(run_id, eval_id:, request_options: {}) + # + # @param run_id [String] The ID of the run to delete. + # + # @param eval_id [String] The ID of the evaluation to delete the run from. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Evals::RunDeleteResponse] + # + # @see OpenAI::Models::Evals::RunDeleteParams + def delete(run_id, params) + parsed, options = OpenAI::Evals::RunDeleteParams.dump_request(params) + eval_id = + parsed.delete(:eval_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :delete, + path: ["evals/%1$s/runs/%2$s", eval_id, run_id], + model: OpenAI::Models::Evals::RunDeleteResponse, + options: options + ) + end + + # Cancel an ongoing evaluation run. + # + # @overload cancel(run_id, eval_id:, request_options: {}) + # + # @param run_id [String] The ID of the run to cancel. + # + # @param eval_id [String] The ID of the evaluation whose run you want to cancel. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Evals::RunCancelResponse] + # + # @see OpenAI::Models::Evals::RunCancelParams + def cancel(run_id, params) + parsed, options = OpenAI::Evals::RunCancelParams.dump_request(params) + eval_id = + parsed.delete(:eval_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :post, + path: ["evals/%1$s/runs/%2$s", eval_id, run_id], + model: OpenAI::Models::Evals::RunCancelResponse, + options: options + ) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + @output_items = OpenAI::Resources::Evals::Runs::OutputItems.new(client: client) + end + end + end + end +end diff --git a/lib/openai/resources/evals/runs/output_items.rb b/lib/openai/resources/evals/runs/output_items.rb new file mode 100644 index 00000000..41f665d8 --- /dev/null +++ b/lib/openai/resources/evals/runs/output_items.rb @@ -0,0 +1,91 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Evals + class Runs + class OutputItems + # Get an evaluation run output item by ID. + # + # @overload retrieve(output_item_id, eval_id:, run_id:, request_options: {}) + # + # @param output_item_id [String] The ID of the output item to retrieve. + # + # @param eval_id [String] The ID of the evaluation to retrieve runs for. + # + # @param run_id [String] The ID of the run to retrieve. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse] + # + # @see OpenAI::Models::Evals::Runs::OutputItemRetrieveParams + def retrieve(output_item_id, params) + parsed, options = OpenAI::Evals::Runs::OutputItemRetrieveParams.dump_request(params) + eval_id = + parsed.delete(:eval_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + run_id = + parsed.delete(:run_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :get, + path: ["evals/%1$s/runs/%2$s/output_items/%3$s", eval_id, run_id, output_item_id], + model: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse, + options: options + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Evals::Runs::OutputItemListParams} for more details. + # + # Get a list of output items for an evaluation run. + # + # @overload list(run_id, eval_id:, after: nil, limit: nil, order: nil, status: nil, request_options: {}) + # + # @param run_id [String] Path param: The ID of the run to retrieve output items for. + # + # @param eval_id [String] Path param: The ID of the evaluation to retrieve runs for. + # + # @param after [String] Query param: Identifier for the last output item from the previous pagination re + # + # @param limit [Integer] Query param: Number of output items to retrieve. + # + # @param order [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Order] Query param: Sort order for output items by timestamp. Use `asc` for ascending o + # + # @param status [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Status] Query param: Filter output items by status. Use `failed` to filter by failed out + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::CursorPage] + # + # @see OpenAI::Models::Evals::Runs::OutputItemListParams + def list(run_id, params) + parsed, options = OpenAI::Evals::Runs::OutputItemListParams.dump_request(params) + eval_id = + parsed.delete(:eval_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :get, + path: ["evals/%1$s/runs/%2$s/output_items", eval_id, run_id], + query: parsed, + page: OpenAI::Internal::CursorPage, + model: OpenAI::Models::Evals::Runs::OutputItemListResponse, + options: options + ) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + end + end + end + end + end +end diff --git a/lib/openai/resources/files.rb b/lib/openai/resources/files.rb index 2164c02b..52d9a808 100644 --- a/lib/openai/resources/files.rb +++ b/lib/openai/resources/files.rb @@ -3,146 +3,152 @@ module OpenAI module Resources class Files + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FileCreateParams} for more details. + # # Upload a file that can be used across various endpoints. Individual files can be - # up to 512 MB, and the size of all files uploaded by one organization can be up - # to 100 GB. + # up to 512 MB, and the size of all files uploaded by one organization can be up + # to 1 TB. + # + # The Assistants API supports files up to 2 million tokens and of specific file + # types. See the + # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for + # details. # - # The Assistants API supports files up to 2 million tokens and of specific file - # types. See the - # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for - # details. + # The Fine-tuning API only supports `.jsonl` files. The input also has certain + # required formats for fine-tuning + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # models. # - # The Fine-tuning API only supports `.jsonl` files. The input also has certain - # required formats for fine-tuning - # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or - # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - # models. + # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also + # has a specific required + # [format](https://platform.openai.com/docs/api-reference/batch/request-input). # - # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also - # has a specific required - # [format](https://platform.openai.com/docs/api-reference/batch/request-input). + # Please [contact us](https://help.openai.com/) if you need to increase these + # storage limits. # - # Please [contact us](https://help.openai.com/) if you need to increase these - # storage limits. + # @overload create(file:, purpose:, expires_after: nil, request_options: {}) # - # @param params [OpenAI::Models::FileCreateParams, Hash{Symbol=>Object}] . + # @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The File object (not file name) to be uploaded. # - # @option params [IO, StringIO] :file The File object (not file name) to be uploaded. + # @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file. One of: - `assistants`: Used in the A # - # @option params [Symbol, OpenAI::Models::FilePurpose] :purpose The intended purpose of the uploaded file. One of: - `assistants`: Used in the - # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for - # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: - # Flexible file type for any purpose - `evals`: Used for eval data sets + # @param expires_after [OpenAI::Models::FileCreateParams::ExpiresAfter] The expiration policy for a file. By default, files with `purpose=batch` expire # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::FileObject] # + # @see OpenAI::Models::FileCreateParams def create(params) - parsed, options = OpenAI::Models::FileCreateParams.dump_request(params) + parsed, options = OpenAI::FileCreateParams.dump_request(params) @client.request( method: :post, path: "files", headers: {"content-type" => "multipart/form-data"}, body: parsed, - model: OpenAI::Models::FileObject, + model: OpenAI::FileObject, options: options ) end # Returns information about a specific file. # - # @param file_id [String] The ID of the file to use for this request. + # @overload retrieve(file_id, request_options: {}) # - # @param params [OpenAI::Models::FileRetrieveParams, Hash{Symbol=>Object}] . + # @param file_id [String] The ID of the file to use for this request. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::FileObject] # + # @see OpenAI::Models::FileRetrieveParams def retrieve(file_id, params = {}) @client.request( method: :get, - path: ["files/%0s", file_id], - model: OpenAI::Models::FileObject, + path: ["files/%1$s", file_id], + model: OpenAI::FileObject, options: params[:request_options] ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FileListParams} for more details. + # # Returns a list of files. # - # @param params [OpenAI::Models::FileListParams, Hash{Symbol=>Object}] . + # @overload list(after: nil, limit: nil, order: nil, purpose: nil, request_options: {}) # - # @option params [String] :after A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place # - # @option params [Integer] :limit A limit on the number of objects to be returned. Limit can range between 1 and - # 10,000, and the default is 10,000. + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 # - # @option params [Symbol, OpenAI::Models::FileListParams::Order] :order Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # @param order [Symbol, OpenAI::Models::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord # - # @option params [String] :purpose Only return files with the given purpose. + # @param purpose [String] Only return files with the given purpose. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::FileListParams def list(params = {}) - parsed, options = OpenAI::Models::FileListParams.dump_request(params) + parsed, options = OpenAI::FileListParams.dump_request(params) @client.request( method: :get, path: "files", query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::FileObject, + page: OpenAI::Internal::CursorPage, + model: OpenAI::FileObject, options: options ) end # Delete a file. # - # @param file_id [String] The ID of the file to use for this request. + # @overload delete(file_id, request_options: {}) # - # @param params [OpenAI::Models::FileDeleteParams, Hash{Symbol=>Object}] . + # @param file_id [String] The ID of the file to use for this request. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::FileDeleted] # + # @see OpenAI::Models::FileDeleteParams def delete(file_id, params = {}) @client.request( method: :delete, - path: ["files/%0s", file_id], - model: OpenAI::Models::FileDeleted, + path: ["files/%1$s", file_id], + model: OpenAI::FileDeleted, options: params[:request_options] ) end # Returns the contents of the specified file. # - # @param file_id [String] The ID of the file to use for this request. + # @overload content(file_id, request_options: {}) # - # @param params [OpenAI::Models::FileContentParams, Hash{Symbol=>Object}] . + # @param file_id [String] The ID of the file to use for this request. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [Object] + # @return [StringIO] # + # @see OpenAI::Models::FileContentParams def content(file_id, params = {}) @client.request( method: :get, - path: ["files/%0s/content", file_id], + path: ["files/%1$s/content", file_id], headers: {"accept" => "application/binary"}, - model: OpenAI::Unknown, + model: StringIO, options: params[:request_options] ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/fine_tuning.rb b/lib/openai/resources/fine_tuning.rb index e7a161e4..9f5fc2c0 100644 --- a/lib/openai/resources/fine_tuning.rb +++ b/lib/openai/resources/fine_tuning.rb @@ -3,14 +3,27 @@ module OpenAI module Resources class FineTuning + # @return [OpenAI::Resources::FineTuning::Methods] + attr_reader :methods_ + # @return [OpenAI::Resources::FineTuning::Jobs] attr_reader :jobs - # @param client [OpenAI::Client] + # @return [OpenAI::Resources::FineTuning::Checkpoints] + attr_reader :checkpoints + + # @return [OpenAI::Resources::FineTuning::Alpha] + attr_reader :alpha + + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client + @methods_ = OpenAI::Resources::FineTuning::Methods.new(client: client) @jobs = OpenAI::Resources::FineTuning::Jobs.new(client: client) + @checkpoints = OpenAI::Resources::FineTuning::Checkpoints.new(client: client) + @alpha = OpenAI::Resources::FineTuning::Alpha.new(client: client) end end end diff --git a/lib/openai/resources/fine_tuning/alpha.rb b/lib/openai/resources/fine_tuning/alpha.rb new file mode 100644 index 00000000..4b3d6bc8 --- /dev/null +++ b/lib/openai/resources/fine_tuning/alpha.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class FineTuning + class Alpha + # @return [OpenAI::Resources::FineTuning::Alpha::Graders] + attr_reader :graders + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + @graders = OpenAI::Resources::FineTuning::Alpha::Graders.new(client: client) + end + end + end + end +end diff --git a/lib/openai/resources/fine_tuning/alpha/graders.rb b/lib/openai/resources/fine_tuning/alpha/graders.rb new file mode 100644 index 00000000..ce7775c2 --- /dev/null +++ b/lib/openai/resources/fine_tuning/alpha/graders.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class FineTuning + class Alpha + class Graders + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::Alpha::GraderRunParams} for more details. + # + # Run a grader. + # + # @overload run(grader:, model_sample:, item: nil, request_options: {}) + # + # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job. + # + # @param model_sample [String] The model sample to be evaluated. This value will be used to populate + # + # @param item [Object] The dataset item provided to the grader. This will be used to populate + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::FineTuning::Alpha::GraderRunResponse] + # + # @see OpenAI::Models::FineTuning::Alpha::GraderRunParams + def run(params) + parsed, options = OpenAI::FineTuning::Alpha::GraderRunParams.dump_request(params) + @client.request( + method: :post, + path: "fine_tuning/alpha/graders/run", + body: parsed, + model: OpenAI::Models::FineTuning::Alpha::GraderRunResponse, + options: options + ) + end + + # Validate a grader. + # + # @overload validate(grader:, request_options: {}) + # + # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::FineTuning::Alpha::GraderValidateResponse] + # + # @see OpenAI::Models::FineTuning::Alpha::GraderValidateParams + def validate(params) + parsed, options = OpenAI::FineTuning::Alpha::GraderValidateParams.dump_request(params) + @client.request( + method: :post, + path: "fine_tuning/alpha/graders/validate", + body: parsed, + model: OpenAI::Models::FineTuning::Alpha::GraderValidateResponse, + options: options + ) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + end + end + end + end + end +end diff --git a/lib/openai/resources/fine_tuning/checkpoints.rb b/lib/openai/resources/fine_tuning/checkpoints.rb new file mode 100644 index 00000000..617521cc --- /dev/null +++ b/lib/openai/resources/fine_tuning/checkpoints.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class FineTuning + class Checkpoints + # @return [OpenAI::Resources::FineTuning::Checkpoints::Permissions] + attr_reader :permissions + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + @permissions = OpenAI::Resources::FineTuning::Checkpoints::Permissions.new(client: client) + end + end + end + end +end diff --git a/lib/openai/resources/fine_tuning/checkpoints/permissions.rb b/lib/openai/resources/fine_tuning/checkpoints/permissions.rb new file mode 100644 index 00000000..f0afec14 --- /dev/null +++ b/lib/openai/resources/fine_tuning/checkpoints/permissions.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class FineTuning + class Checkpoints + class Permissions + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::Checkpoints::PermissionCreateParams} for more + # details. + # + # **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). + # + # This enables organization owners to share fine-tuned models with other projects + # in their organization. + # + # @overload create(fine_tuned_model_checkpoint, project_ids:, request_options: {}) + # + # @param fine_tuned_model_checkpoint [String] The ID of the fine-tuned model checkpoint to create a permission for. + # + # @param project_ids [Array] The project identifiers to grant access to. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::Page] + # + # @see OpenAI::Models::FineTuning::Checkpoints::PermissionCreateParams + def create(fine_tuned_model_checkpoint, params) + parsed, options = OpenAI::FineTuning::Checkpoints::PermissionCreateParams.dump_request(params) + @client.request( + method: :post, + path: ["fine_tuning/checkpoints/%1$s/permissions", fine_tuned_model_checkpoint], + body: parsed, + page: OpenAI::Internal::Page, + model: OpenAI::Models::FineTuning::Checkpoints::PermissionCreateResponse, + options: options + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams} for more + # details. + # + # **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + # + # Organization owners can use this endpoint to view all permissions for a + # fine-tuned model checkpoint. + # + # @overload retrieve(fine_tuned_model_checkpoint, after: nil, limit: nil, order: nil, project_id: nil, request_options: {}) + # + # @param fine_tuned_model_checkpoint [String] The ID of the fine-tuned model checkpoint to get permissions for. + # + # @param after [String] Identifier for the last permission ID from the previous pagination request. + # + # @param limit [Integer] Number of permissions to retrieve. + # + # @param order [Symbol, OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::Order] The order in which to retrieve permissions. + # + # @param project_id [String] The ID of the project to get permissions for. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse] + # + # @see OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams + def retrieve(fine_tuned_model_checkpoint, params = {}) + parsed, options = OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams.dump_request(params) + @client.request( + method: :get, + path: ["fine_tuning/checkpoints/%1$s/permissions", fine_tuned_model_checkpoint], + query: parsed, + model: OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse, + options: options + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteParams} for more + # details. + # + # **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + # + # Organization owners can use this endpoint to delete a permission for a + # fine-tuned model checkpoint. + # + # @overload delete(permission_id, fine_tuned_model_checkpoint:, request_options: {}) + # + # @param permission_id [String] The ID of the fine-tuned model checkpoint permission to delete. + # + # @param fine_tuned_model_checkpoint [String] The ID of the fine-tuned model checkpoint to delete a permission for. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteResponse] + # + # @see OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteParams + def delete(permission_id, params) + parsed, options = OpenAI::FineTuning::Checkpoints::PermissionDeleteParams.dump_request(params) + fine_tuned_model_checkpoint = + parsed.delete(:fine_tuned_model_checkpoint) do + raise ArgumentError.new("missing required path argument #{_1}") + end + @client.request( + method: :delete, + path: [ + "fine_tuning/checkpoints/%1$s/permissions/%2$s", + fine_tuned_model_checkpoint, + permission_id + ], + model: OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteResponse, + options: options + ) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + end + end + end + end + end +end diff --git a/lib/openai/resources/fine_tuning/jobs.rb b/lib/openai/resources/fine_tuning/jobs.rb index bcfa238e..9db01f0c 100644 --- a/lib/openai/resources/fine_tuning/jobs.rb +++ b/lib/openai/resources/fine_tuning/jobs.rb @@ -7,184 +7,210 @@ class Jobs # @return [OpenAI::Resources::FineTuning::Jobs::Checkpoints] attr_reader :checkpoints - # Creates a fine-tuning job which begins the process of creating a new model from - # a given dataset. - # - # Response includes details of the enqueued job including job status and the name - # of the fine-tuned models once complete. - # - # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) - # - # @param params [OpenAI::Models::FineTuning::JobCreateParams, Hash{Symbol=>Object}] . - # - # @option params [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model::Preset] :model The name of the model to fine-tune. You can select one of the - # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). - # - # @option params [String] :training_file The ID of an uploaded file that contains training data. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobCreateParams} for more details. # - # See [upload file](https://platform.openai.com/docs/api-reference/files/create) - # for how to upload a file. - # - # Your dataset must be formatted as a JSONL file. Additionally, you must upload - # your file with the purpose `fine-tune`. - # - # The contents of the file should differ depending on if the model uses the - # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), - # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) - # format, or if the fine-tuning method uses the - # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) - # format. - # - # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - # for more details. + # Creates a fine-tuning job which begins the process of creating a new model from + # a given dataset. # - # @option params [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters] :hyperparameters The hyperparameters used for the fine-tuning job. This value is now deprecated - # in favor of `method`, and should be passed in under the `method` parameter. + # Response includes details of the enqueued job including job status and the name + # of the fine-tuned models once complete. # - # @option params [Array, nil] :integrations A list of integrations to enable for your fine-tuning job. + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @overload create(model:, training_file:, hyperparameters: nil, integrations: nil, metadata: nil, method_: nil, seed: nil, suffix: nil, validation_file: nil, request_options: {}) # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param model [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model] The name of the model to fine-tune. You can select one of the # - # @option params [OpenAI::Models::FineTuning::JobCreateParams::Method] :method_ The method used for fine-tuning. + # @param training_file [String] The ID of an uploaded file that contains training data. # - # @option params [Integer, nil] :seed The seed controls the reproducibility of the job. Passing in the same seed and - # job parameters should produce the same results, but may differ in rare cases. If - # a seed is not specified, one will be generated for you. + # @param hyperparameters [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters] The hyperparameters used for the fine-tuning job. # - # @option params [String, nil] :suffix A string of up to 64 characters that will be added to your fine-tuned model - # name. + # @param integrations [Array, nil] A list of integrations to enable for your fine-tuning job. # - # For example, a `suffix` of "custom-model-name" would produce a model name like - # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [String, nil] :validation_file The ID of an uploaded file that contains validation data. + # @param method_ [OpenAI::Models::FineTuning::JobCreateParams::Method] The method used for fine-tuning. # - # If you provide this file, the data is used to generate validation metrics - # periodically during fine-tuning. These metrics can be viewed in the fine-tuning - # results file. The same data should not be present in both train and validation - # files. + # @param seed [Integer, nil] The seed controls the reproducibility of the job. Passing in the same seed and j # - # Your dataset must be formatted as a JSONL file. You must upload your file with - # the purpose `fine-tune`. + # @param suffix [String, nil] A string of up to 64 characters that will be added to your fine-tuned model name # - # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - # for more details. + # @param validation_file [String, nil] The ID of an uploaded file that contains validation data. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::FineTuning::FineTuningJob] # + # @see OpenAI::Models::FineTuning::JobCreateParams def create(params) - parsed, options = OpenAI::Models::FineTuning::JobCreateParams.dump_request(params) + parsed, options = OpenAI::FineTuning::JobCreateParams.dump_request(params) @client.request( method: :post, path: "fine_tuning/jobs", body: parsed, - model: OpenAI::Models::FineTuning::FineTuningJob, + model: OpenAI::FineTuning::FineTuningJob, options: options ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobRetrieveParams} for more details. + # # Get info about a fine-tuning job. # - # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) # - # @param fine_tuning_job_id [String] The ID of the fine-tuning job. + # @overload retrieve(fine_tuning_job_id, request_options: {}) # - # @param params [OpenAI::Models::FineTuning::JobRetrieveParams, Hash{Symbol=>Object}] . + # @param fine_tuning_job_id [String] The ID of the fine-tuning job. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::FineTuning::FineTuningJob] # + # @see OpenAI::Models::FineTuning::JobRetrieveParams def retrieve(fine_tuning_job_id, params = {}) @client.request( method: :get, - path: ["fine_tuning/jobs/%0s", fine_tuning_job_id], - model: OpenAI::Models::FineTuning::FineTuningJob, + path: ["fine_tuning/jobs/%1$s", fine_tuning_job_id], + model: OpenAI::FineTuning::FineTuningJob, options: params[:request_options] ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobListParams} for more details. + # # List your organization's fine-tuning jobs # - # @param params [OpenAI::Models::FineTuning::JobListParams, Hash{Symbol=>Object}] . + # @overload list(after: nil, limit: nil, metadata: nil, request_options: {}) # - # @option params [String] :after Identifier for the last job from the previous pagination request. + # @param after [String] Identifier for the last job from the previous pagination request. # - # @option params [Integer] :limit Number of fine-tuning jobs to retrieve. + # @param limit [Integer] Number of fine-tuning jobs to retrieve. # - # @option params [Hash{Symbol=>String}, nil] :metadata Optional metadata filter. To filter, use the syntax `metadata[k]=v`. - # Alternatively, set `metadata=null` to indicate no metadata. + # @param metadata [Hash{Symbol=>String}, nil] Optional metadata filter. To filter, use the syntax `metadata[k]=v`. Alternative # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::FineTuning::JobListParams def list(params = {}) - parsed, options = OpenAI::Models::FineTuning::JobListParams.dump_request(params) + parsed, options = OpenAI::FineTuning::JobListParams.dump_request(params) @client.request( method: :get, path: "fine_tuning/jobs", query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::FineTuning::FineTuningJob, + page: OpenAI::Internal::CursorPage, + model: OpenAI::FineTuning::FineTuningJob, options: options ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobCancelParams} for more details. + # # Immediately cancel a fine-tune job. # - # @param fine_tuning_job_id [String] The ID of the fine-tuning job to cancel. + # @overload cancel(fine_tuning_job_id, request_options: {}) # - # @param params [OpenAI::Models::FineTuning::JobCancelParams, Hash{Symbol=>Object}] . + # @param fine_tuning_job_id [String] The ID of the fine-tuning job to cancel. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::FineTuning::FineTuningJob] # + # @see OpenAI::Models::FineTuning::JobCancelParams def cancel(fine_tuning_job_id, params = {}) @client.request( method: :post, - path: ["fine_tuning/jobs/%0s/cancel", fine_tuning_job_id], - model: OpenAI::Models::FineTuning::FineTuningJob, + path: ["fine_tuning/jobs/%1$s/cancel", fine_tuning_job_id], + model: OpenAI::FineTuning::FineTuningJob, options: params[:request_options] ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobListEventsParams} for more details. + # # Get status updates for a fine-tuning job. # - # @param fine_tuning_job_id [String] The ID of the fine-tuning job to get events for. + # @overload list_events(fine_tuning_job_id, after: nil, limit: nil, request_options: {}) # - # @param params [OpenAI::Models::FineTuning::JobListEventsParams, Hash{Symbol=>Object}] . + # @param fine_tuning_job_id [String] The ID of the fine-tuning job to get events for. # - # @option params [String] :after Identifier for the last event from the previous pagination request. + # @param after [String] Identifier for the last event from the previous pagination request. # - # @option params [Integer] :limit Number of events to retrieve. + # @param limit [Integer] Number of events to retrieve. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::FineTuning::JobListEventsParams def list_events(fine_tuning_job_id, params = {}) - parsed, options = OpenAI::Models::FineTuning::JobListEventsParams.dump_request(params) + parsed, options = OpenAI::FineTuning::JobListEventsParams.dump_request(params) @client.request( method: :get, - path: ["fine_tuning/jobs/%0s/events", fine_tuning_job_id], + path: ["fine_tuning/jobs/%1$s/events", fine_tuning_job_id], query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::FineTuning::FineTuningJobEvent, + page: OpenAI::Internal::CursorPage, + model: OpenAI::FineTuning::FineTuningJobEvent, options: options ) end - # @param client [OpenAI::Client] + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobPauseParams} for more details. + # + # Pause a fine-tune job. + # + # @overload pause(fine_tuning_job_id, request_options: {}) + # + # @param fine_tuning_job_id [String] The ID of the fine-tuning job to pause. # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::FineTuning::FineTuningJob] + # + # @see OpenAI::Models::FineTuning::JobPauseParams + def pause(fine_tuning_job_id, params = {}) + @client.request( + method: :post, + path: ["fine_tuning/jobs/%1$s/pause", fine_tuning_job_id], + model: OpenAI::FineTuning::FineTuningJob, + options: params[:request_options] + ) + end + + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::JobResumeParams} for more details. + # + # Resume a fine-tune job. + # + # @overload resume(fine_tuning_job_id, request_options: {}) + # + # @param fine_tuning_job_id [String] The ID of the fine-tuning job to resume. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::FineTuning::FineTuningJob] + # + # @see OpenAI::Models::FineTuning::JobResumeParams + def resume(fine_tuning_job_id, params = {}) + @client.request( + method: :post, + path: ["fine_tuning/jobs/%1$s/resume", fine_tuning_job_id], + model: OpenAI::FineTuning::FineTuningJob, + options: params[:request_options] + ) + end + + # @api private + # + # @param client [OpenAI::Client] def initialize(client:) @client = client @checkpoints = OpenAI::Resources::FineTuning::Jobs::Checkpoints.new(client: client) diff --git a/lib/openai/resources/fine_tuning/jobs/checkpoints.rb b/lib/openai/resources/fine_tuning/jobs/checkpoints.rb index e9f2d303..0483645d 100644 --- a/lib/openai/resources/fine_tuning/jobs/checkpoints.rb +++ b/lib/openai/resources/fine_tuning/jobs/checkpoints.rb @@ -5,34 +5,39 @@ module Resources class FineTuning class Jobs class Checkpoints + # Some parameter documentations has been truncated, see + # {OpenAI::Models::FineTuning::Jobs::CheckpointListParams} for more details. + # # List checkpoints for a fine-tuning job. # - # @param fine_tuning_job_id [String] The ID of the fine-tuning job to get checkpoints for. + # @overload list(fine_tuning_job_id, after: nil, limit: nil, request_options: {}) # - # @param params [OpenAI::Models::FineTuning::Jobs::CheckpointListParams, Hash{Symbol=>Object}] . + # @param fine_tuning_job_id [String] The ID of the fine-tuning job to get checkpoints for. # - # @option params [String] :after Identifier for the last checkpoint ID from the previous pagination request. + # @param after [String] Identifier for the last checkpoint ID from the previous pagination request. # - # @option params [Integer] :limit Number of checkpoints to retrieve. + # @param limit [Integer] Number of checkpoints to retrieve. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::FineTuning::Jobs::CheckpointListParams def list(fine_tuning_job_id, params = {}) - parsed, options = OpenAI::Models::FineTuning::Jobs::CheckpointListParams.dump_request(params) + parsed, options = OpenAI::FineTuning::Jobs::CheckpointListParams.dump_request(params) @client.request( method: :get, - path: ["fine_tuning/jobs/%0s/checkpoints", fine_tuning_job_id], + path: ["fine_tuning/jobs/%1$s/checkpoints", fine_tuning_job_id], query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint, + page: OpenAI::Internal::CursorPage, + model: OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint, options: options ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/fine_tuning/methods.rb b/lib/openai/resources/fine_tuning/methods.rb new file mode 100644 index 00000000..fcc6d076 --- /dev/null +++ b/lib/openai/resources/fine_tuning/methods.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class FineTuning + class Methods + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + end + end + end + end +end diff --git a/lib/openai/resources/graders.rb b/lib/openai/resources/graders.rb new file mode 100644 index 00000000..2aca1a85 --- /dev/null +++ b/lib/openai/resources/graders.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Graders + # @return [OpenAI::Resources::Graders::GraderModels] + attr_reader :grader_models + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + @grader_models = OpenAI::Resources::Graders::GraderModels.new(client: client) + end + end + end +end diff --git a/lib/openai/resources/graders/grader_models.rb b/lib/openai/resources/graders/grader_models.rb new file mode 100644 index 00000000..172b5ef6 --- /dev/null +++ b/lib/openai/resources/graders/grader_models.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Graders + class GraderModels + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + end + end + end + end +end diff --git a/lib/openai/resources/images.rb b/lib/openai/resources/images.rb index 8cc42136..f245cab7 100644 --- a/lib/openai/resources/images.rb +++ b/lib/openai/resources/images.rb @@ -3,142 +3,280 @@ module OpenAI module Resources class Images - # Creates a variation of a given image. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageCreateVariationParams} for more details. # - # @param params [OpenAI::Models::ImageCreateVariationParams, Hash{Symbol=>Object}] . + # Creates a variation of a given image. This endpoint only supports `dall-e-2`. # - # @option params [IO, StringIO] :image The image to use as the basis for the variation(s). Must be a valid PNG file, - # less than 4MB, and square. + # @overload create_variation(image:, model: nil, n: nil, response_format: nil, size: nil, user: nil, request_options: {}) # - # @option params [String, Symbol, OpenAI::Models::ImageModel, nil] :model The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart] The image to use as the basis for the variation(s). Must be a valid PNG file, le # - # @option params [Integer, nil] :n The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - # `n=1` is supported. + # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` is supported at this time # - # @option params [Symbol, OpenAI::Models::ImageCreateVariationParams::ResponseFormat, nil] :response_format The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. # - # @option params [Symbol, OpenAI::Models::ImageCreateVariationParams::Size, nil] :size The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # @param response_format [Symbol, OpenAI::Models::ImageCreateVariationParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or ` # - # @option params [String] :user A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @param size [Symbol, OpenAI::Models::ImageCreateVariationParams::Size, nil] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::ImagesResponse] # + # @see OpenAI::Models::ImageCreateVariationParams def create_variation(params) - parsed, options = OpenAI::Models::ImageCreateVariationParams.dump_request(params) + parsed, options = OpenAI::ImageCreateVariationParams.dump_request(params) @client.request( method: :post, path: "images/variations", headers: {"content-type" => "multipart/form-data"}, body: parsed, - model: OpenAI::Models::ImagesResponse, + model: OpenAI::ImagesResponse, options: options ) end - # Creates an edited or extended image given an original image and a prompt. + # See {OpenAI::Resources::Images#edit_stream_raw} for streaming counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageEditParams} for more details. + # + # Creates an edited or extended image given one or more source images and a + # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. + # + # @overload edit(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {}) + # + # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array] The image(s) to edit. Must be a supported image file or an array of images. # - # @param params [OpenAI::Models::ImageEditParams, Hash{Symbol=>Object}] . + # @param prompt [String] A text description of the desired image(s). The maximum length is 1000 character # - # @option params [IO, StringIO] :image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - # is not provided, image must have transparency, which will be used as the mask. + # @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s). # - # @option params [String] :prompt A text description of the desired image(s). The maximum length is 1000 - # characters. + # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features, # - # @option params [IO, StringIO] :mask An additional image whose fully transparent areas (e.g. where alpha is zero) - # indicate where `image` should be edited. Must be a valid PNG file, less than - # 4MB, and have the same dimensions as `image`. + # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind # - # @option params [String, Symbol, OpenAI::Models::ImageModel, nil] :model The model to use for image generation. Only `dall-e-2` is supported at this - # time. + # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup # - # @option params [Integer, nil] :n The number of images to generate. Must be between 1 and 10. + # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. # - # @option params [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] :response_format The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter # - # @option params [Symbol, OpenAI::Models::ImageEditParams::Size, nil] :size The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024`. + # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is # - # @option params [String] :user A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are + # + # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or ` + # + # @param size [Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands + # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::ImagesResponse] # + # @see OpenAI::Models::ImageEditParams def edit(params) - parsed, options = OpenAI::Models::ImageEditParams.dump_request(params) + parsed, options = OpenAI::ImageEditParams.dump_request(params) + if parsed[:stream] + message = "Please use `#edit_stream_raw` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "images/edits", headers: {"content-type" => "multipart/form-data"}, body: parsed, - model: OpenAI::Models::ImagesResponse, + model: OpenAI::ImagesResponse, + options: options + ) + end + + # See {OpenAI::Resources::Images#edit} for non-streaming counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageEditParams} for more details. + # + # Creates an edited or extended image given one or more source images and a + # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. + # + # @overload edit_stream_raw(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {}) + # + # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array] The image(s) to edit. Must be a supported image file or an array of images. + # + # @param prompt [String] A text description of the desired image(s). The maximum length is 1000 character + # + # @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s). + # + # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features, + # + # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind + # + # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup + # + # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. + # + # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter + # + # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is + # + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for + # + # @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are + # + # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or ` + # + # @param size [Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands + # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::Stream] + # + # @see OpenAI::Models::ImageEditParams + def edit_stream_raw(params) + parsed, options = OpenAI::ImageEditParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#edit` for the non-streaming use case." + raise ArgumentError.new(message) + end + parsed.store(:stream, true) + @client.request( + method: :post, + path: "images/edits", + headers: {"content-type" => "multipart/form-data", "accept" => "text/event-stream"}, + body: parsed, + stream: OpenAI::Internal::Stream, + model: OpenAI::ImageEditStreamEvent, options: options ) end + # See {OpenAI::Resources::Images#generate_stream_raw} for streaming counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageGenerateParams} for more details. + # # Creates an image given a prompt. + # [Learn more](https://platform.openai.com/docs/guides/images). + # + # @overload generate(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {}) + # + # @param prompt [String] A text description of the desired image(s). The maximum length is 32000 characte + # + # @param background [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] Allows to set transparency for the background of the generated image(s). # - # @param params [OpenAI::Models::ImageGenerateParams, Hash{Symbol=>Object}] . + # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-im # - # @option params [String] :prompt A text description of the desired image(s). The maximum length is 1000 - # characters for `dall-e-2` and 4000 characters for `dall-e-3`. + # @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by `gpt-image-1`. Must # - # @option params [String, Symbol, OpenAI::Models::ImageModel, nil] :model The model to use for image generation. + # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only # - # @option params [Integer, nil] :n The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - # `n=1` is supported. + # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter is only # - # @option params [Symbol, OpenAI::Models::ImageGenerateParams::Quality] :quality The quality of the image that will be generated. `hd` creates images with finer - # details and greater consistency across the image. This param is only supported - # for `dall-e-3`. + # @param output_format [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is only su # - # @option params [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] :response_format The format in which the generated images are returned. Must be one of `url` or - # `b64_json`. URLs are only valid for 60 minutes after the image has been - # generated. + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for # - # @option params [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] :size The size of the generated images. Must be one of `256x256`, `512x512`, or - # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - # `1024x1792` for `dall-e-3` models. + # @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] The quality of the image that will be generated. # - # @option params [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] :style The style of the generated images. Must be one of `vivid` or `natural`. Vivid - # causes the model to lean towards generating hyper-real and dramatic images. - # Natural causes the model to produce more natural, less hyper-real looking - # images. This param is only supported for `dall-e-3`. + # @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned # - # @option params [String] :user A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @param size [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param style [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] The style of the generated images. This parameter is only supported for `dall-e- + # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::ImagesResponse] # + # @see OpenAI::Models::ImageGenerateParams def generate(params) - parsed, options = OpenAI::Models::ImageGenerateParams.dump_request(params) + parsed, options = OpenAI::ImageGenerateParams.dump_request(params) + if parsed[:stream] + message = "Please use `#generate_stream_raw` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "images/generations", body: parsed, - model: OpenAI::Models::ImagesResponse, + model: OpenAI::ImagesResponse, options: options ) end - # @param client [OpenAI::Client] + # See {OpenAI::Resources::Images#generate} for non-streaming counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ImageGenerateParams} for more details. + # + # Creates an image given a prompt. + # [Learn more](https://platform.openai.com/docs/guides/images). + # + # @overload generate_stream_raw(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {}) + # + # @param prompt [String] A text description of the desired image(s). The maximum length is 32000 characte + # + # @param background [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] Allows to set transparency for the background of the generated image(s). + # + # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-im + # + # @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by `gpt-image-1`. Must + # + # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # + # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter is only + # + # @param output_format [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is only su # + # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for + # + # @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] The quality of the image that will be generated. + # + # @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned + # + # @param size [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands + # + # @param style [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] The style of the generated images. This parameter is only supported for `dall-e- + # + # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::Stream] + # + # @see OpenAI::Models::ImageGenerateParams + def generate_stream_raw(params) + parsed, options = OpenAI::ImageGenerateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#generate` for the non-streaming use case." + raise ArgumentError.new(message) + end + parsed.store(:stream, true) + @client.request( + method: :post, + path: "images/generations", + headers: {"accept" => "text/event-stream"}, + body: parsed, + stream: OpenAI::Internal::Stream, + model: OpenAI::ImageGenStreamEvent, + options: options + ) + end + + # @api private + # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/models.rb b/lib/openai/resources/models.rb index 9c086955..8a3c6bab 100644 --- a/lib/openai/resources/models.rb +++ b/lib/openai/resources/models.rb @@ -4,66 +4,70 @@ module OpenAI module Resources class Models # Retrieves a model instance, providing basic information about the model such as - # the owner and permissioning. + # the owner and permissioning. # - # @param model [String] The ID of the model to use for this request + # @overload retrieve(model, request_options: {}) # - # @param params [OpenAI::Models::ModelRetrieveParams, Hash{Symbol=>Object}] . + # @param model [String] The ID of the model to use for this request # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Model] # + # @see OpenAI::Models::ModelRetrieveParams def retrieve(model, params = {}) @client.request( method: :get, - path: ["models/%0s", model], - model: OpenAI::Models::Model, + path: ["models/%1$s", model], + model: OpenAI::Model, options: params[:request_options] ) end # Lists the currently available models, and provides basic information about each - # one such as the owner and availability. + # one such as the owner and availability. # - # @param params [OpenAI::Models::ModelListParams, Hash{Symbol=>Object}] . + # @overload list(request_options: {}) # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Page] + # @return [OpenAI::Internal::Page] # + # @see OpenAI::Models::ModelListParams def list(params = {}) @client.request( method: :get, path: "models", - page: OpenAI::Page, - model: OpenAI::Models::Model, + page: OpenAI::Internal::Page, + model: OpenAI::Model, options: params[:request_options] ) end # Delete a fine-tuned model. You must have the Owner role in your organization to - # delete a model. + # delete a model. # - # @param model [String] The model to delete + # @overload delete(model, request_options: {}) # - # @param params [OpenAI::Models::ModelDeleteParams, Hash{Symbol=>Object}] . + # @param model [String] The model to delete # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::ModelDeleted] # + # @see OpenAI::Models::ModelDeleteParams def delete(model, params = {}) @client.request( method: :delete, - path: ["models/%0s", model], - model: OpenAI::Models::ModelDeleted, + path: ["models/%1$s", model], + model: OpenAI::ModelDeleted, options: params[:request_options] ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/moderations.rb b/lib/openai/resources/moderations.rb index f1375d48..a0b0e774 100644 --- a/lib/openai/resources/moderations.rb +++ b/lib/openai/resources/moderations.rb @@ -3,25 +3,25 @@ module OpenAI module Resources class Moderations + # Some parameter documentations has been truncated, see + # {OpenAI::Models::ModerationCreateParams} for more details. + # # Classifies if text and/or image inputs are potentially harmful. Learn more in - # the [moderation guide](https://platform.openai.com/docs/guides/moderation). + # the [moderation guide](https://platform.openai.com/docs/guides/moderation). # - # @param params [OpenAI::Models::ModerationCreateParams, Hash{Symbol=>Object}] . + # @overload create(input:, model: nil, request_options: {}) # - # @option params [String, Array, Array] :input Input (or inputs) to classify. Can be a single string, an array of strings, or - # an array of multi-modal input objects similar to other models. + # @param input [String, Array, Array] Input (or inputs) to classify. Can be a single string, an array of strings, or # - # @option params [String, Symbol, OpenAI::Models::ModerationModel] :model The content moderation model you would like to use. Learn more in - # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and - # learn about available models - # [here](https://platform.openai.com/docs/models#moderation). + # @param model [String, Symbol, OpenAI::Models::ModerationModel] The content moderation model you would like to use. Learn more in # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::ModerationCreateResponse] # + # @see OpenAI::Models::ModerationCreateParams def create(params) - parsed, options = OpenAI::Models::ModerationCreateParams.dump_request(params) + parsed, options = OpenAI::ModerationCreateParams.dump_request(params) @client.request( method: :post, path: "moderations", @@ -31,8 +31,9 @@ def create(params) ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb index 3b4c2c2d..38fd98a1 100644 --- a/lib/openai/resources/responses.rb +++ b/lib/openai/resources/responses.rb @@ -6,321 +6,313 @@ class Responses # @return [OpenAI::Resources::Responses::InputItems] attr_reader :input_items - # Creates a model response. Provide - # [text](https://platform.openai.com/docs/guides/text) or - # [image](https://platform.openai.com/docs/guides/images) inputs to generate - # [text](https://platform.openai.com/docs/guides/text) or - # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have - # the model call your own - # [custom code](https://platform.openai.com/docs/guides/function-calling) or use - # built-in [tools](https://platform.openai.com/docs/guides/tools) like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use - # your own data as input for the model's response. + # See {OpenAI::Resources::Responses#stream_raw} for streaming counterpart. # - # @param params [OpenAI::Models::Responses::ResponseCreateParams, Hash{Symbol=>Object}] . + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCreateParams} for more details. # - # @option params [String, Array] :input Text, image, or file inputs to the model, used to generate a response. + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. # - # Learn more: + # @overload create(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Image inputs](https://platform.openai.com/docs/guides/images) - # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + # @param background [Boolean, nil] Whether to run the model response in the background. # - # @option params [String, Symbol, OpenAI::Models::ChatModel] :model Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are # - # @option params [Array, nil] :include Specify additional output data to include in the model response. Currently - # supported values are: + # @param include [Array, nil] Specify additional output data to include in the model response. Currently # - # - `file_search_call.results`: Include the search results of the file search tool - # call. - # - `message.input_image.image_url`: Include image urls from the input message. - # - `computer_call_output.output.image_url`: Include image urls from the computer - # call output. + # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. # - # @option params [String, nil] :instructions Inserts a system (or developer) message as the first item in the model's - # context. + # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # - # When using along with `previous_response_id`, the instructions from a previous - # response will be not be carried over to the next response. This makes it simple - # to swap out system (or developer) messages in new responses. + # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in # - # @option params [Integer, nil] :max_output_tokens An upper bound for the number of tokens that can be generated for a response, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI # - # @option params [Boolean, nil] :parallel_tool_calls Whether to allow the model to run tool calls in parallel. + # @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel. # - # @option params [String, nil] :previous_response_id The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. Learn more about - # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to # - # @option params [OpenAI::Models::Reasoning, nil] :reasoning **o-series models only** + # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables. # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # - # @option params [Boolean, nil] :store Whether to store the generated model response for later retrieval via API. + # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only** # - # @option params [Float, nil] :temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi # - # @option params [OpenAI::Models::Responses::ResponseTextConfig] :text Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request. # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via # - # @option params [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] :tool_choice How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # @param stream_options [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] Options for streaming responses. Only set this when you set `stream: true`. # - # @option params [Array] :tools An array of tools the model may call while generating a response. You can - # specify which tool to use by setting the `tool_choice` parameter. + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # The two categories of tools you can provide the model are: + # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # - # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - # capabilities, like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search). - # Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). - # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating # - # @option params [Float, nil] :top_p An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # @param tools [Array] An array of tools the model may call while generating a response. You # - # We generally recommend altering this or `temperature` but not both. + # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # - # @option params [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] :truncation The truncation strategy to use for the model response. + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response. # - # @option params [String] :user A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Responses::Response] # - def create(params) - parsed, options = OpenAI::Models::Responses::ResponseCreateParams.dump_request(params) - parsed.delete(:stream) + # @see OpenAI::Models::Responses::ResponseCreateParams + def create(params = {}) + parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params) + if parsed[:stream] + message = "Please use `#stream_raw` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :post, path: "responses", body: parsed, - model: OpenAI::Models::Responses::Response, + model: OpenAI::Responses::Response, options: options ) end - # Creates a model response. Provide - # [text](https://platform.openai.com/docs/guides/text) or - # [image](https://platform.openai.com/docs/guides/images) inputs to generate - # [text](https://platform.openai.com/docs/guides/text) or - # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have - # the model call your own - # [custom code](https://platform.openai.com/docs/guides/function-calling) or use - # built-in [tools](https://platform.openai.com/docs/guides/tools) like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use - # your own data as input for the model's response. + # See {OpenAI::Resources::Responses#create} for non-streaming counterpart. # - # @param params [OpenAI::Models::Responses::ResponseCreateParams, Hash{Symbol=>Object}] . + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCreateParams} for more details. # - # @option params [String, Array] :input Text, image, or file inputs to the model, used to generate a response. + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. # - # Learn more: + # @overload stream_raw(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Image inputs](https://platform.openai.com/docs/guides/images) - # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + # @param background [Boolean, nil] Whether to run the model response in the background. # - # @option params [String, Symbol, OpenAI::Models::ChatModel] :model Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a - # wide range of models with different capabilities, performance characteristics, - # and price points. Refer to the - # [model guide](https://platform.openai.com/docs/models) to browse and compare - # available models. + # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are # - # @option params [Array, nil] :include Specify additional output data to include in the model response. Currently - # supported values are: + # @param include [Array, nil] Specify additional output data to include in the model response. Currently # - # - `file_search_call.results`: Include the search results of the file search tool - # call. - # - `message.input_image.image_url`: Include image urls from the input message. - # - `computer_call_output.output.image_url`: Include image urls from the computer - # call output. + # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. # - # @option params [String, nil] :instructions Inserts a system (or developer) message as the first item in the model's - # context. + # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # - # When using along with `previous_response_id`, the instructions from a previous - # response will be not be carried over to the next response. This makes it simple - # to swap out system (or developer) messages in new responses. + # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in # - # @option params [Integer, nil] :max_output_tokens An upper bound for the number of tokens that can be generated for a response, - # including visible output tokens and - # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI # - # @option params [Boolean, nil] :parallel_tool_calls Whether to allow the model to run tool calls in parallel. + # @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel. # - # @option params [String, nil] :previous_response_id The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. Learn more about - # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to # - # @option params [OpenAI::Models::Reasoning, nil] :reasoning **o-series models only** + # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables. # - # Configuration options for - # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # - # @option params [Boolean, nil] :store Whether to store the generated model response for later retrieval via API. + # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only** # - # @option params [Float, nil] :temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - # make the output more random, while lower values like 0.2 will make it more - # focused and deterministic. We generally recommend altering this or `top_p` but - # not both. + # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi # - # @option params [OpenAI::Models::Responses::ResponseTextConfig] :text Configuration options for a text response from the model. Can be plain text or - # structured JSON data. Learn more: + # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request. # - # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via # - # @option params [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] :tool_choice How the model should select which tool (or tools) to use when generating a - # response. See the `tools` parameter to see how to specify which tools the model - # can call. + # @param stream_options [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] Options for streaming responses. Only set this when you set `stream: true`. # - # @option params [Array] :tools An array of tools the model may call while generating a response. You can - # specify which tool to use by setting the `tool_choice` parameter. + # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # The two categories of tools you can provide the model are: + # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # - # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's - # capabilities, like - # [web search](https://platform.openai.com/docs/guides/tools-web-search) or - # [file search](https://platform.openai.com/docs/guides/tools-file-search). - # Learn more about - # [built-in tools](https://platform.openai.com/docs/guides/tools). - # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about - # [function calling](https://platform.openai.com/docs/guides/function-calling). + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating # - # @option params [Float, nil] :top_p An alternative to sampling with temperature, called nucleus sampling, where the - # model considers the results of the tokens with top_p probability mass. So 0.1 - # means only the tokens comprising the top 10% probability mass are considered. + # @param tools [Array] An array of tools the model may call while generating a response. You # - # We generally recommend altering this or `temperature` but not both. + # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # - # @option params [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] :truncation The truncation strategy to use for the model response. + # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, # - # - `auto`: If the context of this response and previous ones exceeds the model's - # context window size, the model will truncate the response to fit the context - # window by dropping input items in the middle of the conversation. - # - `disabled` (default): If a model response will exceed the context window size - # for a model, the request will fail with a 400 error. + # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response. # - # @option params [String] :user A unique identifier representing your end-user, which can help OpenAI to monitor - # and detect abuse. - # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Stream] + # @return [OpenAI::Internal::Stream] # - def create_streaming(params) - parsed, options = OpenAI::Models::Responses::ResponseCreateParams.dump_request(params) + # @see OpenAI::Models::Responses::ResponseCreateParams + def stream_raw(params = {}) + parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#create` for the non-streaming use case." + raise ArgumentError.new(message) + end parsed.store(:stream, true) @client.request( method: :post, path: "responses", headers: {"accept" => "text/event-stream"}, body: parsed, - stream: OpenAI::Stream, - model: OpenAI::Models::Responses::ResponseStreamEvent, + stream: OpenAI::Internal::Stream, + model: OpenAI::Responses::ResponseStreamEvent, options: options ) end + # See {OpenAI::Resources::Responses#retrieve_streaming} for streaming counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseRetrieveParams} for more details. + # # Retrieves a model response with the given ID. # + # @overload retrieve(response_id, include: nil, include_obfuscation: nil, starting_after: nil, request_options: {}) + # # @param response_id [String] The ID of the response to retrieve. # - # @param params [OpenAI::Models::Responses::ResponseRetrieveParams, Hash{Symbol=>Object}] . + # @param include [Array] Additional fields to include in the response. See the `include` + # + # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds # - # @option params [Array] :include Additional fields to include in the response. See the `include` parameter for - # Response creation above for more information. + # @param starting_after [Integer] The sequence number of the event after which to start streaming. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Responses::Response] # + # @see OpenAI::Models::Responses::ResponseRetrieveParams def retrieve(response_id, params = {}) - parsed, options = OpenAI::Models::Responses::ResponseRetrieveParams.dump_request(params) + parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params) + if parsed[:stream] + message = "Please use `#retrieve_streaming` for the streaming use case." + raise ArgumentError.new(message) + end @client.request( method: :get, - path: ["responses/%0s", response_id], + path: ["responses/%1$s", response_id], query: parsed, - model: OpenAI::Models::Responses::Response, + model: OpenAI::Responses::Response, + options: options + ) + end + + # See {OpenAI::Resources::Responses#retrieve} for non-streaming counterpart. + # + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseRetrieveParams} for more details. + # + # Retrieves a model response with the given ID. + # + # @overload retrieve_streaming(response_id, include: nil, include_obfuscation: nil, starting_after: nil, request_options: {}) + # + # @param response_id [String] The ID of the response to retrieve. + # + # @param include [Array] Additional fields to include in the response. See the `include` + # + # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds + # + # @param starting_after [Integer] The sequence number of the event after which to start streaming. + # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Internal::Stream] + # + # @see OpenAI::Models::Responses::ResponseRetrieveParams + def retrieve_streaming(response_id, params = {}) + parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params) + unless parsed.fetch(:stream, true) + message = "Please use `#retrieve` for the non-streaming use case." + raise ArgumentError.new(message) + end + parsed.store(:stream, true) + @client.request( + method: :get, + path: ["responses/%1$s", response_id], + query: parsed, + headers: {"accept" => "text/event-stream"}, + stream: OpenAI::Internal::Stream, + model: OpenAI::Responses::ResponseStreamEvent, options: options ) end # Deletes a model response with the given ID. # - # @param response_id [String] The ID of the response to delete. + # @overload delete(response_id, request_options: {}) # - # @param params [OpenAI::Models::Responses::ResponseDeleteParams, Hash{Symbol=>Object}] . + # @param response_id [String] The ID of the response to delete. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [nil] # + # @see OpenAI::Models::Responses::ResponseDeleteParams def delete(response_id, params = {}) @client.request( method: :delete, - path: ["responses/%0s", response_id], + path: ["responses/%1$s", response_id], model: NilClass, options: params[:request_options] ) end - # @param client [OpenAI::Client] + # Cancels a model response with the given ID. Only responses created with the + # `background` parameter set to `true` can be cancelled. + # [Learn more](https://platform.openai.com/docs/guides/background). + # + # @overload cancel(response_id, request_options: {}) + # + # @param response_id [String] The ID of the response to cancel. # + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] + # + # @return [OpenAI::Models::Responses::Response] + # + # @see OpenAI::Models::Responses::ResponseCancelParams + def cancel(response_id, params = {}) + @client.request( + method: :post, + path: ["responses/%1$s/cancel", response_id], + model: OpenAI::Responses::Response, + options: params[:request_options] + ) + end + + # @api private + # + # @param client [OpenAI::Client] def initialize(client:) @client = client @input_items = OpenAI::Resources::Responses::InputItems.new(client: client) diff --git a/lib/openai/resources/responses/input_items.rb b/lib/openai/resources/responses/input_items.rb index 87ea88ab..551f4f38 100644 --- a/lib/openai/resources/responses/input_items.rb +++ b/lib/openai/resources/responses/input_items.rb @@ -4,42 +4,43 @@ module OpenAI module Resources class Responses class InputItems - # Returns a list of input items for a given response. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::InputItemListParams} for more details. # - # @param response_id [String] The ID of the response to retrieve input items for. + # Returns a list of input items for a given response. # - # @param params [OpenAI::Models::Responses::InputItemListParams, Hash{Symbol=>Object}] . + # @overload list(response_id, after: nil, include: nil, limit: nil, order: nil, request_options: {}) # - # @option params [String] :after An item ID to list items after, used in pagination. + # @param response_id [String] The ID of the response to retrieve input items for. # - # @option params [String] :before An item ID to list items before, used in pagination. + # @param after [String] An item ID to list items after, used in pagination. # - # @option params [Integer] :limit A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # @param include [Array] Additional fields to include in the response. See the `include` # - # @option params [Symbol, OpenAI::Models::Responses::InputItemListParams::Order] :order The order to return the input items in. Default is `asc`. + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between # - # - `asc`: Return the input items in ascending order. - # - `desc`: Return the input items in descending order. + # @param order [Symbol, OpenAI::Models::Responses::InputItemListParams::Order] The order to return the input items in. Default is `desc`. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::Responses::InputItemListParams def list(response_id, params = {}) - parsed, options = OpenAI::Models::Responses::InputItemListParams.dump_request(params) + parsed, options = OpenAI::Responses::InputItemListParams.dump_request(params) @client.request( method: :get, - path: ["responses/%0s/input_items", response_id], + path: ["responses/%1$s/input_items", response_id], query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::Responses::ResponseItemList::Data, + page: OpenAI::Internal::CursorPage, + model: OpenAI::Responses::ResponseItem, options: options ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/uploads.rb b/lib/openai/resources/uploads.rb index e7a40175..b9037fd9 100644 --- a/lib/openai/resources/uploads.rb +++ b/lib/openai/resources/uploads.rb @@ -6,116 +6,118 @@ class Uploads # @return [OpenAI::Resources::Uploads::Parts] attr_reader :parts - # Creates an intermediate - # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object - # that you can add - # [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. - # Currently, an Upload can accept at most 8 GB in total and expires after an hour - # after you create it. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::UploadCreateParams} for more details. # - # Once you complete the Upload, we will create a - # [File](https://platform.openai.com/docs/api-reference/files/object) object that - # contains all the parts you uploaded. This File is usable in the rest of our - # platform as a regular File object. + # Creates an intermediate + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object + # that you can add + # [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. + # Currently, an Upload can accept at most 8 GB in total and expires after an hour + # after you create it. # - # For certain `purpose` values, the correct `mime_type` must be specified. Please - # refer to documentation for the - # [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). + # Once you complete the Upload, we will create a + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # contains all the parts you uploaded. This File is usable in the rest of our + # platform as a regular File object. # - # For guidance on the proper filename extensions for each purpose, please follow - # the documentation on - # [creating a File](https://platform.openai.com/docs/api-reference/files/create). + # For certain `purpose` values, the correct `mime_type` must be specified. Please + # refer to documentation for the + # [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). # - # @param params [OpenAI::Models::UploadCreateParams, Hash{Symbol=>Object}] . + # For guidance on the proper filename extensions for each purpose, please follow + # the documentation on + # [creating a File](https://platform.openai.com/docs/api-reference/files/create). # - # @option params [Integer] :bytes The number of bytes in the file you are uploading. + # @overload create(bytes:, filename:, mime_type:, purpose:, expires_after: nil, request_options: {}) # - # @option params [String] :filename The name of the file to upload. + # @param bytes [Integer] The number of bytes in the file you are uploading. # - # @option params [String] :mime_type The MIME type of the file. + # @param filename [String] The name of the file to upload. # - # This must fall within the supported MIME types for your file purpose. See the - # supported MIME types for assistants and vision. + # @param mime_type [String] The MIME type of the file. # - # @option params [Symbol, OpenAI::Models::FilePurpose] :purpose The intended purpose of the uploaded file. + # @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file. # - # See the - # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + # @param expires_after [OpenAI::Models::UploadCreateParams::ExpiresAfter] The expiration policy for a file. By default, files with `purpose=batch` expire # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Upload] # + # @see OpenAI::Models::UploadCreateParams def create(params) - parsed, options = OpenAI::Models::UploadCreateParams.dump_request(params) - @client.request( - method: :post, - path: "uploads", - body: parsed, - model: OpenAI::Models::Upload, - options: options - ) + parsed, options = OpenAI::UploadCreateParams.dump_request(params) + @client.request(method: :post, path: "uploads", body: parsed, model: OpenAI::Upload, options: options) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::UploadCancelParams} for more details. + # # Cancels the Upload. No Parts may be added after an Upload is cancelled. # - # @param upload_id [String] The ID of the Upload. + # @overload cancel(upload_id, request_options: {}) # - # @param params [OpenAI::Models::UploadCancelParams, Hash{Symbol=>Object}] . + # @param upload_id [String] The ID of the Upload. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Upload] # + # @see OpenAI::Models::UploadCancelParams def cancel(upload_id, params = {}) @client.request( method: :post, - path: ["uploads/%0s/cancel", upload_id], - model: OpenAI::Models::Upload, + path: ["uploads/%1$s/cancel", upload_id], + model: OpenAI::Upload, options: params[:request_options] ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::UploadCompleteParams} for more details. + # # Completes the - # [Upload](https://platform.openai.com/docs/api-reference/uploads/object). + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object). # - # Within the returned Upload object, there is a nested - # [File](https://platform.openai.com/docs/api-reference/files/object) object that - # is ready to use in the rest of the platform. + # Within the returned Upload object, there is a nested + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # is ready to use in the rest of the platform. # - # You can specify the order of the Parts by passing in an ordered list of the Part - # IDs. + # You can specify the order of the Parts by passing in an ordered list of the Part + # IDs. # - # The number of bytes uploaded upon completion must match the number of bytes - # initially specified when creating the Upload object. No Parts may be added after - # an Upload is completed. + # The number of bytes uploaded upon completion must match the number of bytes + # initially specified when creating the Upload object. No Parts may be added after + # an Upload is completed. # - # @param upload_id [String] The ID of the Upload. + # @overload complete(upload_id, part_ids:, md5: nil, request_options: {}) # - # @param params [OpenAI::Models::UploadCompleteParams, Hash{Symbol=>Object}] . + # @param upload_id [String] The ID of the Upload. # - # @option params [Array] :part_ids The ordered list of Part IDs. + # @param part_ids [Array] The ordered list of Part IDs. # - # @option params [String] :md5 The optional md5 checksum for the file contents to verify if the bytes uploaded - # matches what you expect. + # @param md5 [String] The optional md5 checksum for the file contents to verify if the bytes uploaded # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Upload] # + # @see OpenAI::Models::UploadCompleteParams def complete(upload_id, params) - parsed, options = OpenAI::Models::UploadCompleteParams.dump_request(params) + parsed, options = OpenAI::UploadCompleteParams.dump_request(params) @client.request( method: :post, - path: ["uploads/%0s/complete", upload_id], + path: ["uploads/%1$s/complete", upload_id], body: parsed, - model: OpenAI::Models::Upload, + model: OpenAI::Upload, options: options ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client @parts = OpenAI::Resources::Uploads::Parts.new(client: client) diff --git a/lib/openai/resources/uploads/parts.rb b/lib/openai/resources/uploads/parts.rb index c6f90707..05f10f4c 100644 --- a/lib/openai/resources/uploads/parts.rb +++ b/lib/openai/resources/uploads/parts.rb @@ -4,42 +4,47 @@ module OpenAI module Resources class Uploads class Parts + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Uploads::PartCreateParams} for more details. + # # Adds a - # [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an - # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. - # A Part represents a chunk of bytes from the file you are trying to upload. + # [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. + # A Part represents a chunk of bytes from the file you are trying to upload. # - # Each Part can be at most 64 MB, and you can add Parts until you hit the Upload - # maximum of 8 GB. + # Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + # maximum of 8 GB. # - # It is possible to add multiple Parts in parallel. You can decide the intended - # order of the Parts when you - # [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). + # It is possible to add multiple Parts in parallel. You can decide the intended + # order of the Parts when you + # [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). # - # @param upload_id [String] The ID of the Upload. + # @overload create(upload_id, data:, request_options: {}) # - # @param params [OpenAI::Models::Uploads::PartCreateParams, Hash{Symbol=>Object}] . + # @param upload_id [String] The ID of the Upload. # - # @option params [IO, StringIO] :data The chunk of bytes for this Part. + # @param data [Pathname, StringIO, IO, String, OpenAI::FilePart] The chunk of bytes for this Part. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::Uploads::UploadPart] # + # @see OpenAI::Models::Uploads::PartCreateParams def create(upload_id, params) - parsed, options = OpenAI::Models::Uploads::PartCreateParams.dump_request(params) + parsed, options = OpenAI::Uploads::PartCreateParams.dump_request(params) @client.request( method: :post, - path: ["uploads/%0s/parts", upload_id], + path: ["uploads/%1$s/parts", upload_id], headers: {"content-type" => "multipart/form-data"}, body: parsed, - model: OpenAI::Models::Uploads::UploadPart, + model: OpenAI::Uploads::UploadPart, options: options ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/vector_stores.rb b/lib/openai/resources/vector_stores.rb index 18803daa..d903b9ef 100644 --- a/lib/openai/resources/vector_stores.rb +++ b/lib/openai/resources/vector_stores.rb @@ -9,185 +9,182 @@ class VectorStores # @return [OpenAI::Resources::VectorStores::FileBatches] attr_reader :file_batches - # Create a vector store. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreCreateParams} for more details. # - # @param params [OpenAI::Models::VectorStoreCreateParams, Hash{Symbol=>Object}] . + # Create a vector store. # - # @option params [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] :chunking_strategy The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # @overload create(chunking_strategy: nil, expires_after: nil, file_ids: nil, metadata: nil, name: nil, request_options: {}) # - # @option params [OpenAI::Models::VectorStoreCreateParams::ExpiresAfter] :expires_after The expiration policy for a vector store. + # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto` # - # @option params [Array] :file_ids A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. + # @param expires_after [OpenAI::Models::VectorStoreCreateParams::ExpiresAfter] The expiration policy for a vector store. # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param file_ids [Array] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [String] :name The name of the vector store. + # @param name [String] The name of the vector store. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStore] # + # @see OpenAI::Models::VectorStoreCreateParams def create(params = {}) - parsed, options = OpenAI::Models::VectorStoreCreateParams.dump_request(params) + parsed, options = OpenAI::VectorStoreCreateParams.dump_request(params) @client.request( method: :post, path: "vector_stores", body: parsed, - model: OpenAI::Models::VectorStore, - options: options + model: OpenAI::VectorStore, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end # Retrieves a vector store. # - # @param vector_store_id [String] The ID of the vector store to retrieve. + # @overload retrieve(vector_store_id, request_options: {}) # - # @param params [OpenAI::Models::VectorStoreRetrieveParams, Hash{Symbol=>Object}] . + # @param vector_store_id [String] The ID of the vector store to retrieve. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStore] # + # @see OpenAI::Models::VectorStoreRetrieveParams def retrieve(vector_store_id, params = {}) @client.request( method: :get, - path: ["vector_stores/%0s", vector_store_id], - model: OpenAI::Models::VectorStore, - options: params[:request_options] + path: ["vector_stores/%1$s", vector_store_id], + model: OpenAI::VectorStore, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h} ) end - # Modifies a vector store. + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreUpdateParams} for more details. # - # @param vector_store_id [String] The ID of the vector store to modify. + # Modifies a vector store. # - # @param params [OpenAI::Models::VectorStoreUpdateParams, Hash{Symbol=>Object}] . + # @overload update(vector_store_id, expires_after: nil, metadata: nil, name: nil, request_options: {}) # - # @option params [OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter, nil] :expires_after The expiration policy for a vector store. + # @param vector_store_id [String] The ID of the vector store to modify. # - # @option params [Hash{Symbol=>String}, nil] :metadata Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. + # @param expires_after [OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter, nil] The expiration policy for a vector store. # - # Keys are strings with a maximum length of 64 characters. Values are strings with - # a maximum length of 512 characters. + # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [String, nil] :name The name of the vector store. + # @param name [String, nil] The name of the vector store. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStore] # + # @see OpenAI::Models::VectorStoreUpdateParams def update(vector_store_id, params = {}) - parsed, options = OpenAI::Models::VectorStoreUpdateParams.dump_request(params) + parsed, options = OpenAI::VectorStoreUpdateParams.dump_request(params) @client.request( method: :post, - path: ["vector_stores/%0s", vector_store_id], + path: ["vector_stores/%1$s", vector_store_id], body: parsed, - model: OpenAI::Models::VectorStore, - options: options + model: OpenAI::VectorStore, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreListParams} for more details. + # # Returns a list of vector stores. # - # @param params [OpenAI::Models::VectorStoreListParams, Hash{Symbol=>Object}] . + # @overload list(after: nil, before: nil, limit: nil, order: nil, request_options: {}) # - # @option params [String] :after A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place # - # @option params [String] :before A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place # - # @option params [Integer] :limit A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 # - # @option params [Symbol, OpenAI::Models::VectorStoreListParams::Order] :order Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # @param order [Symbol, OpenAI::Models::VectorStoreListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::VectorStoreListParams def list(params = {}) - parsed, options = OpenAI::Models::VectorStoreListParams.dump_request(params) + parsed, options = OpenAI::VectorStoreListParams.dump_request(params) @client.request( method: :get, path: "vector_stores", query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::VectorStore, - options: options + page: OpenAI::Internal::CursorPage, + model: OpenAI::VectorStore, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end # Delete a vector store. # - # @param vector_store_id [String] The ID of the vector store to delete. + # @overload delete(vector_store_id, request_options: {}) # - # @param params [OpenAI::Models::VectorStoreDeleteParams, Hash{Symbol=>Object}] . + # @param vector_store_id [String] The ID of the vector store to delete. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStoreDeleted] # + # @see OpenAI::Models::VectorStoreDeleteParams def delete(vector_store_id, params = {}) @client.request( method: :delete, - path: ["vector_stores/%0s", vector_store_id], - model: OpenAI::Models::VectorStoreDeleted, - options: params[:request_options] + path: ["vector_stores/%1$s", vector_store_id], + model: OpenAI::VectorStoreDeleted, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h} ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStoreSearchParams} for more details. + # # Search a vector store for relevant chunks based on a query and file attributes - # filter. + # filter. # - # @param vector_store_id [String] The ID of the vector store to search. + # @overload search(vector_store_id, query:, filters: nil, max_num_results: nil, ranking_options: nil, rewrite_query: nil, request_options: {}) # - # @param params [OpenAI::Models::VectorStoreSearchParams, Hash{Symbol=>Object}] . + # @param vector_store_id [String] The ID of the vector store to search. # - # @option params [String, Array] :query A query string for a search + # @param query [String, Array] A query string for a search # - # @option params [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] :filters A filter to apply based on file attributes. + # @param filters [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] A filter to apply based on file attributes. # - # @option params [Integer] :max_num_results The maximum number of results to return. This number should be between 1 and 50 - # inclusive. + # @param max_num_results [Integer] The maximum number of results to return. This number should be between 1 and 50 # - # @option params [OpenAI::Models::VectorStoreSearchParams::RankingOptions] :ranking_options Ranking options for search. + # @param ranking_options [OpenAI::Models::VectorStoreSearchParams::RankingOptions] Ranking options for search. # - # @option params [Boolean] :rewrite_query Whether to rewrite the natural language query for vector search. + # @param rewrite_query [Boolean] Whether to rewrite the natural language query for vector search. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Page] + # @return [OpenAI::Internal::Page] # + # @see OpenAI::Models::VectorStoreSearchParams def search(vector_store_id, params) - parsed, options = OpenAI::Models::VectorStoreSearchParams.dump_request(params) + parsed, options = OpenAI::VectorStoreSearchParams.dump_request(params) @client.request( method: :post, - path: ["vector_stores/%0s/search", vector_store_id], + path: ["vector_stores/%1$s/search", vector_store_id], body: parsed, - page: OpenAI::Page, + page: OpenAI::Internal::Page, model: OpenAI::Models::VectorStoreSearchResponse, - options: options + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client @files = OpenAI::Resources::VectorStores::Files.new(client: client) diff --git a/lib/openai/resources/vector_stores/file_batches.rb b/lib/openai/resources/vector_stores/file_batches.rb index 3c484618..99d3e7df 100644 --- a/lib/openai/resources/vector_stores/file_batches.rb +++ b/lib/openai/resources/vector_stores/file_batches.rb @@ -4,139 +4,137 @@ module OpenAI module Resources class VectorStores class FileBatches + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileBatchCreateParams} for more details. + # # Create a vector store file batch. # - # @param vector_store_id [String] The ID of the vector store for which to create a File Batch. + # @overload create(vector_store_id, file_ids:, attributes: nil, chunking_strategy: nil, request_options: {}) # - # @param params [OpenAI::Models::VectorStores::FileBatchCreateParams, Hash{Symbol=>Object}] . + # @param vector_store_id [String] The ID of the vector store for which to create a File Batch. # - # @option params [Array] :file_ids A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - # the vector store should use. Useful for tools like `file_search` that can access - # files. + # @param file_ids [Array] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that # - # @option params [Hash{Symbol=>String, Float, Boolean}, nil] :attributes Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] :chunking_strategy The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto` # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch] # + # @see OpenAI::Models::VectorStores::FileBatchCreateParams def create(vector_store_id, params) - parsed, options = OpenAI::Models::VectorStores::FileBatchCreateParams.dump_request(params) + parsed, options = OpenAI::VectorStores::FileBatchCreateParams.dump_request(params) @client.request( method: :post, - path: ["vector_stores/%0s/file_batches", vector_store_id], + path: ["vector_stores/%1$s/file_batches", vector_store_id], body: parsed, - model: OpenAI::Models::VectorStores::VectorStoreFileBatch, - options: options + model: OpenAI::VectorStores::VectorStoreFileBatch, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end # Retrieves a vector store file batch. # - # @param batch_id [String] The ID of the file batch being retrieved. + # @overload retrieve(batch_id, vector_store_id:, request_options: {}) # - # @param params [OpenAI::Models::VectorStores::FileBatchRetrieveParams, Hash{Symbol=>Object}] . + # @param batch_id [String] The ID of the file batch being retrieved. # - # @option params [String] :vector_store_id The ID of the vector store that the file batch belongs to. + # @param vector_store_id [String] The ID of the vector store that the file batch belongs to. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch] # + # @see OpenAI::Models::VectorStores::FileBatchRetrieveParams def retrieve(batch_id, params) - parsed, options = OpenAI::Models::VectorStores::FileBatchRetrieveParams.dump_request(params) - vector_store_id = parsed.delete(:vector_store_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::VectorStores::FileBatchRetrieveParams.dump_request(params) + vector_store_id = + parsed.delete(:vector_store_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :get, - path: ["vector_stores/%0s/file_batches/%1s", vector_store_id, batch_id], - model: OpenAI::Models::VectorStores::VectorStoreFileBatch, - options: options + path: ["vector_stores/%1$s/file_batches/%2$s", vector_store_id, batch_id], + model: OpenAI::VectorStores::VectorStoreFileBatch, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end # Cancel a vector store file batch. This attempts to cancel the processing of - # files in this batch as soon as possible. + # files in this batch as soon as possible. # - # @param batch_id [String] The ID of the file batch to cancel. + # @overload cancel(batch_id, vector_store_id:, request_options: {}) # - # @param params [OpenAI::Models::VectorStores::FileBatchCancelParams, Hash{Symbol=>Object}] . + # @param batch_id [String] The ID of the file batch to cancel. # - # @option params [String] :vector_store_id The ID of the vector store that the file batch belongs to. + # @param vector_store_id [String] The ID of the vector store that the file batch belongs to. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch] # + # @see OpenAI::Models::VectorStores::FileBatchCancelParams def cancel(batch_id, params) - parsed, options = OpenAI::Models::VectorStores::FileBatchCancelParams.dump_request(params) - vector_store_id = parsed.delete(:vector_store_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::VectorStores::FileBatchCancelParams.dump_request(params) + vector_store_id = + parsed.delete(:vector_store_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :post, - path: ["vector_stores/%0s/file_batches/%1s/cancel", vector_store_id, batch_id], - model: OpenAI::Models::VectorStores::VectorStoreFileBatch, - options: options + path: ["vector_stores/%1$s/file_batches/%2$s/cancel", vector_store_id, batch_id], + model: OpenAI::VectorStores::VectorStoreFileBatch, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileBatchListFilesParams} for more details. + # # Returns a list of vector store files in a batch. # - # @param batch_id [String] Path param: The ID of the file batch that the files belong to. + # @overload list_files(batch_id, vector_store_id:, after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {}) # - # @param params [OpenAI::Models::VectorStores::FileBatchListFilesParams, Hash{Symbol=>Object}] . + # @param batch_id [String] Path param: The ID of the file batch that the files belong to. # - # @option params [String] :vector_store_id Path param: The ID of the vector store that the files belong to. + # @param vector_store_id [String] Path param: The ID of the vector store that the files belong to. # - # @option params [String] :after Query param: A cursor for use in pagination. `after` is an object ID that - # defines your place in the list. For instance, if you make a list request and - # receive 100 objects, ending with obj_foo, your subsequent call can include - # after=obj_foo in order to fetch the next page of the list. + # @param after [String] Query param: A cursor for use in pagination. `after` is an object ID that define # - # @option params [String] :before Query param: A cursor for use in pagination. `before` is an object ID that - # defines your place in the list. For instance, if you make a list request and - # receive 100 objects, starting with obj_foo, your subsequent call can include - # before=obj_foo in order to fetch the previous page of the list. + # @param before [String] Query param: A cursor for use in pagination. `before` is an object ID that defin # - # @option params [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Filter] :filter Query param: Filter by file status. One of `in_progress`, `completed`, `failed`, - # `cancelled`. + # @param filter [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Filter] Query param: Filter by file status. One of `in_progress`, `completed`, `failed`, # - # @option params [Integer] :limit Query param: A limit on the number of objects to be returned. Limit can range - # between 1 and 100, and the default is 20. + # @param limit [Integer] Query param: A limit on the number of objects to be returned. Limit can range be # - # @option params [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Order] :order Query param: Sort order by the `created_at` timestamp of the objects. `asc` for - # ascending order and `desc` for descending order. + # @param order [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Order] Query param: Sort order by the `created_at` timestamp of the objects. `asc` for # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::VectorStores::FileBatchListFilesParams def list_files(batch_id, params) - parsed, options = OpenAI::Models::VectorStores::FileBatchListFilesParams.dump_request(params) - vector_store_id = parsed.delete(:vector_store_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::VectorStores::FileBatchListFilesParams.dump_request(params) + vector_store_id = + parsed.delete(:vector_store_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :get, - path: ["vector_stores/%0s/file_batches/%1s/files", vector_store_id, batch_id], + path: ["vector_stores/%1$s/file_batches/%2$s/files", vector_store_id, batch_id], query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::VectorStores::VectorStoreFile, - options: options + page: OpenAI::Internal::CursorPage, + model: OpenAI::VectorStores::VectorStoreFile, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/vector_stores/files.rb b/lib/openai/resources/vector_stores/files.rb index 23eafaa3..8852aed7 100644 --- a/lib/openai/resources/vector_stores/files.rb +++ b/lib/openai/resources/vector_stores/files.rb @@ -4,195 +4,196 @@ module OpenAI module Resources class VectorStores class Files + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileCreateParams} for more details. + # # Create a vector store file by attaching a - # [File](https://platform.openai.com/docs/api-reference/files) to a - # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). + # [File](https://platform.openai.com/docs/api-reference/files) to a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). # - # @param vector_store_id [String] The ID of the vector store for which to create a File. + # @overload create(vector_store_id, file_id:, attributes: nil, chunking_strategy: nil, request_options: {}) # - # @param params [OpenAI::Models::VectorStores::FileCreateParams, Hash{Symbol=>Object}] . + # @param vector_store_id [String] The ID of the vector store for which to create a File. # - # @option params [String] :file_id A [File](https://platform.openai.com/docs/api-reference/files) ID that the - # vector store should use. Useful for tools like `file_search` that can access - # files. + # @param file_id [String] A [File](https://platform.openai.com/docs/api-reference/files) ID that the vecto # - # @option params [Hash{Symbol=>String, Float, Boolean}, nil] :attributes Set of 16 key-value pairs that can be attached to an object. This can be useful - # for storing additional information about the object in a structured format, and - # querying for objects via API or the dashboard. Keys are strings with a maximum - # length of 64 characters. Values are strings with a maximum length of 512 - # characters, booleans, or numbers. + # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # - # @option params [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] :chunking_strategy The chunking strategy used to chunk the file(s). If not set, will use the `auto` - # strategy. Only applicable if `file_ids` is non-empty. + # @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto` # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStores::VectorStoreFile] # + # @see OpenAI::Models::VectorStores::FileCreateParams def create(vector_store_id, params) - parsed, options = OpenAI::Models::VectorStores::FileCreateParams.dump_request(params) + parsed, options = OpenAI::VectorStores::FileCreateParams.dump_request(params) @client.request( method: :post, - path: ["vector_stores/%0s/files", vector_store_id], + path: ["vector_stores/%1$s/files", vector_store_id], body: parsed, - model: OpenAI::Models::VectorStores::VectorStoreFile, - options: options + model: OpenAI::VectorStores::VectorStoreFile, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end # Retrieves a vector store file. # - # @param file_id [String] The ID of the file being retrieved. + # @overload retrieve(file_id, vector_store_id:, request_options: {}) # - # @param params [OpenAI::Models::VectorStores::FileRetrieveParams, Hash{Symbol=>Object}] . + # @param file_id [String] The ID of the file being retrieved. # - # @option params [String] :vector_store_id The ID of the vector store that the file belongs to. + # @param vector_store_id [String] The ID of the vector store that the file belongs to. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStores::VectorStoreFile] # + # @see OpenAI::Models::VectorStores::FileRetrieveParams def retrieve(file_id, params) - parsed, options = OpenAI::Models::VectorStores::FileRetrieveParams.dump_request(params) - vector_store_id = parsed.delete(:vector_store_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::VectorStores::FileRetrieveParams.dump_request(params) + vector_store_id = + parsed.delete(:vector_store_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :get, - path: ["vector_stores/%0s/files/%1s", vector_store_id, file_id], - model: OpenAI::Models::VectorStores::VectorStoreFile, - options: options + path: ["vector_stores/%1$s/files/%2$s", vector_store_id, file_id], + model: OpenAI::VectorStores::VectorStoreFile, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileUpdateParams} for more details. + # # Update attributes on a vector store file. # - # @param file_id [String] Path param: The ID of the file to update attributes. + # @overload update(file_id, vector_store_id:, attributes:, request_options: {}) # - # @param params [OpenAI::Models::VectorStores::FileUpdateParams, Hash{Symbol=>Object}] . + # @param file_id [String] Path param: The ID of the file to update attributes. # - # @option params [String] :vector_store_id Path param: The ID of the vector store the file belongs to. + # @param vector_store_id [String] Path param: The ID of the vector store the file belongs to. # - # @option params [Hash{Symbol=>String, Float, Boolean}, nil] :attributes Body param: Set of 16 key-value pairs that can be attached to an object. This - # can be useful for storing additional information about the object in a - # structured format, and querying for objects via API or the dashboard. Keys are - # strings with a maximum length of 64 characters. Values are strings with a - # maximum length of 512 characters, booleans, or numbers. + # @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Body param: Set of 16 key-value pairs that can be attached to an object. This ca # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStores::VectorStoreFile] # + # @see OpenAI::Models::VectorStores::FileUpdateParams def update(file_id, params) - parsed, options = OpenAI::Models::VectorStores::FileUpdateParams.dump_request(params) - vector_store_id = parsed.delete(:vector_store_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::VectorStores::FileUpdateParams.dump_request(params) + vector_store_id = + parsed.delete(:vector_store_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :post, - path: ["vector_stores/%0s/files/%1s", vector_store_id, file_id], + path: ["vector_stores/%1$s/files/%2$s", vector_store_id, file_id], body: parsed, - model: OpenAI::Models::VectorStores::VectorStoreFile, - options: options + model: OpenAI::VectorStores::VectorStoreFile, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end + # Some parameter documentations has been truncated, see + # {OpenAI::Models::VectorStores::FileListParams} for more details. + # # Returns a list of vector store files. # - # @param vector_store_id [String] The ID of the vector store that the files belong to. + # @overload list(vector_store_id, after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {}) # - # @param params [OpenAI::Models::VectorStores::FileListParams, Hash{Symbol=>Object}] . + # @param vector_store_id [String] The ID of the vector store that the files belong to. # - # @option params [String] :after A cursor for use in pagination. `after` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # ending with obj_foo, your subsequent call can include after=obj_foo in order to - # fetch the next page of the list. + # @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place # - # @option params [String] :before A cursor for use in pagination. `before` is an object ID that defines your place - # in the list. For instance, if you make a list request and receive 100 objects, - # starting with obj_foo, your subsequent call can include before=obj_foo in order - # to fetch the previous page of the list. + # @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place # - # @option params [Symbol, OpenAI::Models::VectorStores::FileListParams::Filter] :filter Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + # @param filter [Symbol, OpenAI::Models::VectorStores::FileListParams::Filter] Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. # - # @option params [Integer] :limit A limit on the number of objects to be returned. Limit can range between 1 and - # 100, and the default is 20. + # @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1 # - # @option params [Symbol, OpenAI::Models::VectorStores::FileListParams::Order] :order Sort order by the `created_at` timestamp of the objects. `asc` for ascending - # order and `desc` for descending order. + # @param order [Symbol, OpenAI::Models::VectorStores::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::CursorPage] + # @return [OpenAI::Internal::CursorPage] # + # @see OpenAI::Models::VectorStores::FileListParams def list(vector_store_id, params = {}) - parsed, options = OpenAI::Models::VectorStores::FileListParams.dump_request(params) + parsed, options = OpenAI::VectorStores::FileListParams.dump_request(params) @client.request( method: :get, - path: ["vector_stores/%0s/files", vector_store_id], + path: ["vector_stores/%1$s/files", vector_store_id], query: parsed, - page: OpenAI::CursorPage, - model: OpenAI::Models::VectorStores::VectorStoreFile, - options: options + page: OpenAI::Internal::CursorPage, + model: OpenAI::VectorStores::VectorStoreFile, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end # Delete a vector store file. This will remove the file from the vector store but - # the file itself will not be deleted. To delete the file, use the - # [delete file](https://platform.openai.com/docs/api-reference/files/delete) - # endpoint. + # the file itself will not be deleted. To delete the file, use the + # [delete file](https://platform.openai.com/docs/api-reference/files/delete) + # endpoint. # - # @param file_id [String] The ID of the file to delete. + # @overload delete(file_id, vector_store_id:, request_options: {}) # - # @param params [OpenAI::Models::VectorStores::FileDeleteParams, Hash{Symbol=>Object}] . + # @param file_id [String] The ID of the file to delete. # - # @option params [String] :vector_store_id The ID of the vector store that the file belongs to. + # @param vector_store_id [String] The ID of the vector store that the file belongs to. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # # @return [OpenAI::Models::VectorStores::VectorStoreFileDeleted] # + # @see OpenAI::Models::VectorStores::FileDeleteParams def delete(file_id, params) - parsed, options = OpenAI::Models::VectorStores::FileDeleteParams.dump_request(params) - vector_store_id = parsed.delete(:vector_store_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::VectorStores::FileDeleteParams.dump_request(params) + vector_store_id = + parsed.delete(:vector_store_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :delete, - path: ["vector_stores/%0s/files/%1s", vector_store_id, file_id], - model: OpenAI::Models::VectorStores::VectorStoreFileDeleted, - options: options + path: ["vector_stores/%1$s/files/%2$s", vector_store_id, file_id], + model: OpenAI::VectorStores::VectorStoreFileDeleted, + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end # Retrieve the parsed contents of a vector store file. # - # @param file_id [String] The ID of the file within the vector store. + # @overload content(file_id, vector_store_id:, request_options: {}) # - # @param params [OpenAI::Models::VectorStores::FileContentParams, Hash{Symbol=>Object}] . + # @param file_id [String] The ID of the file within the vector store. # - # @option params [String] :vector_store_id The ID of the vector store. + # @param vector_store_id [String] The ID of the vector store. # - # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options + # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Page] + # @return [OpenAI::Internal::Page] # + # @see OpenAI::Models::VectorStores::FileContentParams def content(file_id, params) - parsed, options = OpenAI::Models::VectorStores::FileContentParams.dump_request(params) - vector_store_id = parsed.delete(:vector_store_id) do - raise ArgumentError.new("missing required path argument #{_1}") - end + parsed, options = OpenAI::VectorStores::FileContentParams.dump_request(params) + vector_store_id = + parsed.delete(:vector_store_id) do + raise ArgumentError.new("missing required path argument #{_1}") + end @client.request( method: :get, - path: ["vector_stores/%0s/files/%1s/content", vector_store_id, file_id], - page: OpenAI::Page, + path: ["vector_stores/%1$s/files/%2$s/content", vector_store_id, file_id], + page: OpenAI::Internal::Page, model: OpenAI::Models::VectorStores::FileContentResponse, - options: options + options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options} ) end - # @param client [OpenAI::Client] + # @api private # + # @param client [OpenAI::Client] def initialize(client:) @client = client end diff --git a/lib/openai/resources/webhooks.rb b/lib/openai/resources/webhooks.rb new file mode 100644 index 00000000..b5c3a91d --- /dev/null +++ b/lib/openai/resources/webhooks.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +module OpenAI + module Resources + class Webhooks + # Validates that the given payload was sent by OpenAI and parses the payload. + # + # @param payload [String] The raw webhook payload as a string + # + # @return [OpenAI::Models::Webhooks::BatchCancelledWebhookEvent, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent, OpenAI::Models::Webhooks::BatchFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent] + def unwrap(payload) + parsed = JSON.parse(payload, symbolize_names: true) + OpenAI::Internal::Type::Converter.coerce(OpenAI::Models::Webhooks::UnwrapWebhookEvent, parsed) + end + + # @api private + # + # @param client [OpenAI::Client] + def initialize(client:) + @client = client + end + end + end +end diff --git a/lib/openai/stream.rb b/lib/openai/stream.rb deleted file mode 100644 index 801b6247..00000000 --- a/lib/openai/stream.rb +++ /dev/null @@ -1,66 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - # @private - # - # @example - # ```ruby - # stream.for_each do |message| - # puts(message) - # end - # ``` - # - # @example - # ```ruby - # messages = stream.to_enum.take(2) - # - # messages => Array - # ``` - class Stream < OpenAI::BaseStream - # @private - # - # @return [Enumerable] - # - private def iterator - # rubocop:disable Metrics/BlockLength - @iterator ||= OpenAI::Util.chain_fused(@messages) do |y| - consume = false - - @messages.each do |msg| - next if consume - - case msg - in { data: String => data } if data.start_with?("[DONE]") - consume = true - next - in { data: String => data } - case JSON.parse(data, symbolize_names: true) - in { error: error } - message = - case error - in String - error - in { message: String => m } - m - else - "An error occurred during streaming" - end - OpenAI::APIError.for( - url: @url, - status: @status, - body: body, - request: nil, - response: @response, - message: message - ) - in decoded - y << OpenAI::Converter.coerce(@model, decoded) - end - else - end - end - end - # rubocop:enable Metrics/BlockLength - end - end -end diff --git a/lib/openai/util.rb b/lib/openai/util.rb deleted file mode 100644 index 29f6258a..00000000 --- a/lib/openai/util.rb +++ /dev/null @@ -1,659 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - # rubocop:disable Metrics/ModuleLength - - # @private - # - module Util - # @private - # - # @return [Float] - # - def self.monotonic_secs = Process.clock_gettime(Process::CLOCK_MONOTONIC) - - class << self - # @private - # - # @return [String] - # - def arch - case (arch = RbConfig::CONFIG["arch"])&.downcase - in nil - "unknown" - in /aarch64|arm64/ - "arm64" - in /x86_64/ - "x64" - in /arm/ - "arm" - else - "other:#{arch}" - end - end - - # @private - # - # @return [String] - # - def os - case (host = RbConfig::CONFIG["host_os"])&.downcase - in nil - "Unknown" - in /linux/ - "Linux" - in /darwin/ - "MacOS" - in /freebsd/ - "FreeBSD" - in /openbsd/ - "OpenBSD" - in /mswin|mingw|cygwin|ucrt/ - "Windows" - else - "Other:#{host}" - end - end - end - - class << self - # @private - # - # @param input [Object] - # - # @return [Boolean, Object] - # - def primitive?(input) - case input - in true | false | Integer | Float | Symbol | String - true - else - false - end - end - - # @private - # - # @param input [Object] - # - # @return [Boolean, Object] - # - def coerce_boolean(input) - case input.is_a?(String) ? input.downcase : input - in Numeric - !input.zero? - in "true" - true - in "false" - false - else - input - end - end - - # @private - # - # @param input [Object] - # - # @raise [ArgumentError] - # @return [Boolean, nil] - # - def coerce_boolean!(input) - case coerce_boolean(input) - in true | false | nil => coerced - coerced - else - raise ArgumentError.new("Unable to coerce #{input.inspect} into boolean value") - end - end - - # @private - # - # @param input [Object] - # - # @return [Integer, Object] - # - def coerce_integer(input) - case input - in true - 1 - in false - 0 - else - Integer(input, exception: false) || input - end - end - - # @private - # - # @param input [Object] - # - # @return [Float, Object] - # - def coerce_float(input) - case input - in true - 1.0 - in false - 0.0 - else - Float(input, exception: false) || input - end - end - - # @private - # - # @param input [Object] - # - # @return [Hash{Object=>Object}, Object] - # - def coerce_hash(input) - case input - in NilClass | Array | Set | Enumerator - input - else - input.respond_to?(:to_h) ? input.to_h : input - end - end - end - - # Use this to indicate that a value should be explicitly removed from a data - # structure when using `OpenAI::Util.deep_merge`. - # - # e.g. merging `{a: 1}` and `{a: OMIT}` should produce `{}`, where merging - # `{a: 1}` and `{}` would produce `{a: 1}`. - OMIT = Object.new.freeze - - class << self - # @private - # - # @param lhs [Object] - # @param rhs [Object] - # @param concat [Boolean] - # - # @return [Object] - # - private def deep_merge_lr(lhs, rhs, concat: false) - case [lhs, rhs, concat] - in [Hash, Hash, _] - # rubocop:disable Style/YodaCondition - rhs_cleaned = rhs.reject { |_, val| OMIT == val } - lhs - .reject { |key, _| OMIT == rhs[key] } - .merge(rhs_cleaned) do |_, old_val, new_val| - deep_merge_lr(old_val, new_val, concat: concat) - end - # rubocop:enable Style/YodaCondition - in [Array, Array, true] - lhs.concat(rhs) - else - rhs - end - end - - # @private - # - # Recursively merge one hash with another. If the values at a given key are not - # both hashes, just take the new value. - # - # @param values [Array] - # - # @param sentinel [Object, nil] the value to return if no values are provided. - # - # @param concat [Boolean] whether to merge sequences by concatenation. - # - # @return [Object] - # - def deep_merge(*values, sentinel: nil, concat: false) - case values - in [value, *values] - values.reduce(value) do |acc, val| - deep_merge_lr(acc, val, concat: concat) - end - else - sentinel - end - end - - # @private - # - # @param data [Hash{Symbol=>Object}, Array, Object] - # @param pick [Symbol, Integer, Array, nil] - # @param sentinel [Object, nil] - # @param blk [Proc, nil] - # - # @return [Object, nil] - # - def dig(data, pick, sentinel = nil, &blk) - case [data, pick, blk] - in [_, nil, nil] - data - in [Hash, Symbol, _] | [Array, Integer, _] - blk.nil? ? data.fetch(pick, sentinel) : data.fetch(pick, &blk) - in [Hash | Array, Array, _] - pick.reduce(data) do |acc, key| - case acc - in Hash if acc.key?(key) - acc.fetch(key) - in Array if key.is_a?(Integer) && key < acc.length - acc[key] - else - return blk.nil? ? sentinel : blk.call - end - end - in _ - blk.nil? ? sentinel : blk.call - end - end - end - - class << self - # @private - # - # @param uri [URI::Generic] - # - # @return [String] - # - def uri_origin(uri) - "#{uri.scheme}://#{uri.host}#{uri.port == uri.default_port ? '' : ":#{uri.port}"}" - end - - # @private - # - # @param path [String, Array] - # - # @return [String] - # - def interpolate_path(path) - case path - in String - path - in [] - "" - in [String, *interpolations] - encoded = interpolations.map { ERB::Util.url_encode(_1) } - path.first % encoded - end - end - end - - class << self - # @private - # - # @param query [String, nil] - # - # @return [Hash{String=>Array}] - # - def decode_query(query) - CGI.parse(query.to_s) - end - - # @private - # - # @param query [Hash{String=>Array, String, nil}, nil] - # - # @return [String, nil] - # - def encode_query(query) - query.to_h.empty? ? nil : URI.encode_www_form(query) - end - end - - class << self - # @private - # - # @param url [URI::Generic, String] - # - # @return [Hash{Symbol=>String, Integer, nil}] - # - def parse_uri(url) - parsed = URI::Generic.component.zip(URI.split(url)).to_h - {**parsed, query: decode_query(parsed.fetch(:query))} - end - - # @private - # - # @param parsed [Hash{Symbol=>String, Integer, nil}] . - # - # @option parsed [String, nil] :scheme - # - # @option parsed [String, nil] :host - # - # @option parsed [Integer, nil] :port - # - # @option parsed [String, nil] :path - # - # @option parsed [Hash{String=>Array}] :query - # - # @return [URI::Generic] - # - def unparse_uri(parsed) - URI::Generic.build(**parsed, query: encode_query(parsed.fetch(:query))) - end - - # @private - # - # @param lhs [Hash{Symbol=>String, Integer, nil}] . - # - # @option lhs [String, nil] :scheme - # - # @option lhs [String, nil] :host - # - # @option lhs [Integer, nil] :port - # - # @option lhs [String, nil] :path - # - # @option lhs [Hash{String=>Array}] :query - # - # @param rhs [Hash{Symbol=>String, Integer, nil}] . - # - # @option rhs [String, nil] :scheme - # - # @option rhs [String, nil] :host - # - # @option rhs [Integer, nil] :port - # - # @option rhs [String, nil] :path - # - # @option rhs [Hash{String=>Array}] :query - # - # @return [URI::Generic] - # - def join_parsed_uri(lhs, rhs) - base_path, base_query = lhs.fetch_values(:path, :query) - slashed = base_path.end_with?("/") ? base_path : "#{base_path}/" - - parsed_path, parsed_query = parse_uri(rhs.fetch(:path)).fetch_values(:path, :query) - override = URI::Generic.build(**rhs.slice(:scheme, :host, :port), path: parsed_path) - - joined = URI.join(URI::Generic.build(lhs.except(:path, :query)), slashed, override) - query = deep_merge( - joined.path == base_path ? base_query : {}, - parsed_query, - rhs[:query].to_h, - concat: true - ) - - joined.query = encode_query(query) - joined - end - end - - class << self - # @private - # - # @param headers [Hash{String=>String, Integer, Array, nil}] - # - # @return [Hash{String=>String}] - # - def normalized_headers(*headers) - {}.merge(*headers.compact).to_h do |key, val| - case val - in Array - val.map { _1.to_s.strip }.join(", ") - else - val&.to_s&.strip - end - [key.downcase, val] - end - end - end - - class << self - # @private - # - # @param io [StringIO] - # @param boundary [String] - # @param key [Symbol, String] - # @param val [Object] - # - private def encode_multipart_formdata(io, boundary:, key:, val:) - io << "--#{boundary}\r\n" - io << "Content-Disposition: form-data" - unless key.nil? - name = ERB::Util.url_encode(key.to_s) - io << "; name=\"#{name}\"" - end - if val.is_a?(IO) - filename = ERB::Util.url_encode(File.basename(val.to_path)) - io << "; filename=\"#{filename}\"" - end - io << "\r\n" - case val - in IO | StringIO - io << "Content-Type: application/octet-stream\r\n\r\n" - IO.copy_stream(val, io) - in String - io << "Content-Type: application/octet-stream\r\n\r\n" - io << val.to_s - in true | false | Integer | Float | Symbol - io << "Content-Type: text/plain\r\n\r\n" - io << val.to_s - else - io << "Content-Type: application/json\r\n\r\n" - io << JSON.fast_generate(val) - end - io << "\r\n" - end - - # @private - # - # @param headers [Hash{String=>String}] - # @param body [Object] - # - # @return [Object] - # - def encode_content(headers, body) - content_type = headers["content-type"] - case [content_type, body] - in ["application/json", Hash | Array] - [headers, JSON.fast_generate(body)] - in [%r{^multipart/form-data}, Hash | IO | StringIO] - boundary = SecureRandom.urlsafe_base64(60) - strio = StringIO.new.tap do |io| - case body - in Hash - body.each do |key, val| - case val - in Array if val.all? { primitive?(_1) } - val.each do |v| - encode_multipart_formdata(io, boundary: boundary, key: key, val: v) - end - else - encode_multipart_formdata(io, boundary: boundary, key: key, val: val) - end - end - else - encode_multipart_formdata(io, boundary: boundary, key: nil, val: body) - end - io << "--#{boundary}--\r\n" - io.rewind - end - headers = { - **headers, - "content-type" => "#{content_type}; boundary=#{boundary}", - "transfer-encoding" => "chunked" - } - [headers, strio] - in [_, StringIO] - [headers, body.string] - in [_, IO] - headers = {**headers, "transfer-encoding" => "chunked"} - [headers, body] - else - [headers, body] - end - end - - # @private - # - # @param headers [Hash{String=>String}, Net::HTTPHeader] - # @param stream [Enumerable] - # @param suppress_error [Boolean] - # - # @raise [JSON::ParserError] - # @return [Object] - # - def decode_content(headers, stream:, suppress_error: false) - case headers["content-type"] - in %r{^application/(?:vnd\.api\+)?json} - json = stream.to_a.join - begin - JSON.parse(json, symbolize_names: true) - rescue JSON::ParserError => e - raise e unless suppress_error - json - end - in %r{^text/event-stream} - lines = decode_lines(stream) - decode_sse(lines) - in %r{^application/(?:x-)?jsonl} - decode_lines(stream) - in %r{^text/} - stream.to_a.join - else - # TODO: parsing other response types - StringIO.new(stream.to_a.join) - end - end - end - - class << self - # @private - # - # https://doc.rust-lang.org/std/iter/trait.FusedIterator.html - # - # @param enum [Enumerable] - # @param external [Boolean] - # @param close [Proc] - # - # @return [Enumerable] - # - def fused_enum(enum, external: false, &close) - fused = false - iter = Enumerator.new do |y| - next if fused - - fused = true - if external - loop { y << enum.next } - else - enum.each(&y) - end - ensure - close&.call - close = nil - end - - iter.define_singleton_method(:rewind) do - fused = true - self - end - iter - end - - # @private - # - # @param enum [Enumerable, nil] - # - def close_fused!(enum) - return unless enum.is_a?(Enumerator) - - # rubocop:disable Lint/UnreachableLoop - enum.rewind.each { break } - # rubocop:enable Lint/UnreachableLoop - end - - # @private - # - # @param enum [Enumerable, nil] - # @param blk [Proc] - # - def chain_fused(enum, &blk) - iter = Enumerator.new { blk.call(_1) } - fused_enum(iter) { close_fused!(enum) } - end - end - - class << self - # @private - # - # @param enum [Enumerable] - # - # @return [Enumerable] - # - def decode_lines(enum) - re = /(\r\n|\r|\n)/ - buffer = String.new.b - cr_seen = nil - - chain_fused(enum) do |y| - enum.each do |row| - buffer << row - while (match = re.match(buffer, cr_seen.to_i)) - case [match.captures.first, cr_seen] - in ["\r", nil] - cr_seen = match.end(1) - next - in ["\r" | "\r\n", Integer] - y << buffer.slice!(..(cr_seen.pred)) - else - y << buffer.slice!(..(match.end(1).pred)) - end - cr_seen = nil - end - end - - y << buffer.slice!(..(cr_seen.pred)) unless cr_seen.nil? - y << buffer unless buffer.empty? - end - end - - # @private - # - # https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream - # - # @param lines [Enumerable] - # - # @return [Hash{Symbol=>Object}] - # - def decode_sse(lines) - # rubocop:disable Metrics/BlockLength - chain_fused(lines) do |y| - blank = {event: nil, data: nil, id: nil, retry: nil} - current = {} - - lines.each do |line| - case line.sub(/\R$/, "") - in "" - next if current.empty? - y << {**blank, **current} - current = {} - in /^:/ - next - in /^([^:]+):\s?(.*)$/ - field, value = Regexp.last_match.captures - case field - in "event" - current.merge!(event: value) - in "data" - (current[:data] ||= String.new.b) << value << "\n" - in "id" unless value.include?("\0") - current.merge!(id: value) - in "retry" if /^\d+$/ =~ value - current.merge!(retry: Integer(value)) - else - end - else - end - end - # rubocop:enable Metrics/BlockLength - - y << {**blank, **current} unless current.empty? - end - end - end - end - - # rubocop:enable Metrics/ModuleLength -end diff --git a/lib/openai/version.rb b/lib/openai/version.rb index 804492b4..345bbb9b 100644 --- a/lib/openai/version.rb +++ b/lib/openai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module OpenAI - VERSION = "0.0.1-alpha.0" + VERSION = "0.19.0" end diff --git a/manifest.yaml b/manifest.yaml index e2306edd..556686f5 100644 --- a/manifest.yaml +++ b/manifest.yaml @@ -1,10 +1,12 @@ dependencies: + - English - cgi - date - erb - etc - json - net/http + - pathname - rbconfig - securerandom - set diff --git a/openai.gemspec b/openai.gemspec index e2cf1559..cc0a7426 100644 --- a/openai.gemspec +++ b/openai.gemspec @@ -8,12 +8,21 @@ Gem::Specification.new do |s| s.summary = "Ruby library to access the OpenAI API" s.authors = ["OpenAI"] s.email = "support@openai.com" - s.files = Dir["lib/**/*.rb", "rbi/**/*.rbi", "sig/**/*.rbs", "manifest.yaml"] - s.extra_rdoc_files = ["README.md"] - s.required_ruby_version = ">= 3.0.0" - s.add_dependency "connection_pool" - s.homepage = "https://gemdocs.org/gems/openai/latest" + s.homepage = "https://gemdocs.org/gems/openai" s.metadata["homepage_uri"] = s.homepage s.metadata["source_code_uri"] = "https://github.com/openai/openai-ruby" - s.metadata["rubygems_mfa_required"] = "false" + s.metadata["rubygems_mfa_required"] = false.to_s + s.required_ruby_version = ">= 3.2.0" + + s.files = Dir[ + "lib/**/*.rb", + "rbi/**/*.rbi", + "sig/**/*.rbs", + "manifest.yaml", + "SECURITY.md", + "CHANGELOG.md", + ".ignore" + ] + s.extra_rdoc_files = ["README.md"] + s.add_dependency "connection_pool" end diff --git a/rbi/lib/openai/base_client.rbi b/rbi/lib/openai/base_client.rbi deleted file mode 100644 index ae1f372e..00000000 --- a/rbi/lib/openai/base_client.rbi +++ /dev/null @@ -1,175 +0,0 @@ -# typed: strong - -module OpenAI - class BaseClient - abstract! - - RequestComponentsShape = T.type_alias do - { - method: Symbol, - path: T.any(String, T::Array[String]), - query: T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))]), - headers: T.nilable( - T::Hash[String, - T.nilable( - T.any( - String, - Integer, - T::Array[T.nilable(T.any(String, Integer))] - ) - )] - ), - body: T.nilable(T.anything), - unwrap: T.nilable(Symbol), - page: T.nilable(T::Class[OpenAI::BasePage[OpenAI::BaseModel]]), - stream: T.nilable(T::Class[OpenAI::BaseStream[OpenAI::BaseModel]]), - model: T.nilable(OpenAI::Converter::Input), - options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - } - end - - RequestInputShape = T.type_alias do - { - method: Symbol, - url: URI::Generic, - headers: T::Hash[String, String], - body: T.anything, - max_retries: Integer, - timeout: Float - } - end - - MAX_REDIRECTS = 20 - - PLATFORM_HEADERS = T::Hash[String, String] - - class << self - sig { params(req: OpenAI::BaseClient::RequestComponentsShape).void } - def validate!(req) - end - - sig do - params(status: Integer, headers: T.any(T::Hash[String, String], Net::HTTPHeader)).returns(T::Boolean) - end - def should_retry?(status, headers:) - end - - sig do - params( - request: OpenAI::BaseClient::RequestInputShape, - status: Integer, - response_headers: T.any(T::Hash[String, String], Net::HTTPHeader) - ) - .returns(OpenAI::BaseClient::RequestInputShape) - end - def follow_redirect(request, status:, response_headers:) - end - end - - sig { returns(T.anything) } - def requester - end - - sig { params(_: T.anything).returns(T.anything) } - def requester=(_) - end - - sig do - params( - base_url: String, - timeout: Float, - max_retries: Integer, - initial_retry_delay: Float, - max_retry_delay: Float, - headers: T::Hash[String, - T.nilable(T.any(String, Integer, T::Array[T.nilable(T.any(String, Integer))]))], - idempotency_header: T.nilable(String) - ) - .returns(T.attached_class) - end - def self.new( - base_url:, - timeout: 0.0, - max_retries: 0, - initial_retry_delay: 0.0, - max_retry_delay: 0.0, - headers: {}, - idempotency_header: nil - ) - end - - sig { overridable.returns(T::Hash[String, String]) } - private def auth_headers - end - - sig { returns(String) } - private def generate_idempotency_key - end - - sig do - overridable - .params(req: OpenAI::BaseClient::RequestComponentsShape, opts: T::Hash[Symbol, T.anything]) - .returns(OpenAI::BaseClient::RequestInputShape) - end - private def build_request(req, opts) - end - - sig { params(headers: T::Hash[String, String], retry_count: Integer).returns(Float) } - private def retry_delay(headers, retry_count:) - end - - sig do - params( - request: OpenAI::BaseClient::RequestInputShape, - redirect_count: Integer, - retry_count: Integer, - send_retry_header: T::Boolean - ) - .returns([Integer, Net::HTTPResponse, T::Enumerable[String]]) - end - private def send_request(request, redirect_count:, retry_count:, send_retry_header:) - end - - sig do - params( - method: Symbol, - path: T.any(String, T::Array[String]), - query: T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))]), - headers: T.nilable( - T::Hash[String, - T.nilable( - T.any( - String, - Integer, - T::Array[T.nilable(T.any(String, Integer))] - ) - )] - ), - body: T.nilable(T.anything), - unwrap: T.nilable(Symbol), - page: T.nilable(T::Class[OpenAI::BasePage[OpenAI::BaseModel]]), - stream: T.nilable(T::Class[OpenAI::BaseStream[OpenAI::BaseModel]]), - model: T.nilable(OpenAI::Converter::Input), - options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(T.anything) - end - def request( - method, - path, - query: {}, - headers: {}, - body: nil, - unwrap: nil, - page: nil, - stream: nil, - model: OpenAI::Unknown, - options: {} - ) - end - - sig { returns(String) } - def inspect - end - end -end diff --git a/rbi/lib/openai/base_model.rbi b/rbi/lib/openai/base_model.rbi deleted file mode 100644 index f64d87e2..00000000 --- a/rbi/lib/openai/base_model.rbi +++ /dev/null @@ -1,491 +0,0 @@ -# typed: strong - -module OpenAI - module Converter - abstract! - - Input = T.type_alias { T.any(OpenAI::Converter, T::Class[T.anything]) } - - sig { overridable.params(value: T.anything).returns(T.anything) } - def coerce(value) - end - - sig { overridable.params(value: T.anything).returns(T.anything) } - def dump(value) - end - - sig do - overridable - .params(value: T.anything) - .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) - end - def try_strict_coerce(value) - end - - class << self - sig do - params( - spec: T.any( - { - const: T.nilable(T.any(NilClass, T::Boolean, Integer, Float, Symbol)), - enum: T.nilable(T.proc.returns(OpenAI::Converter::Input)), - union: T.nilable(T.proc.returns(OpenAI::Converter::Input)) - }, - T.proc.returns(OpenAI::Converter::Input), - OpenAI::Converter::Input - ) - ) - .returns(T.proc.returns(T.anything).void) - end - def self.type_info(spec) - end - - sig { params(target: OpenAI::Converter::Input, value: T.anything).returns(T.anything) } - def self.coerce(target, value) - end - - sig { params(target: OpenAI::Converter::Input, value: T.anything).returns(T.anything) } - def self.dump(target, value) - end - - sig { params(target: OpenAI::Converter::Input, value: T.anything).returns(T.anything) } - def self.try_strict_coerce(target, value) - end - end - end - - class Unknown - abstract! - - extend OpenAI::Converter - - sig { params(other: T.anything).returns(T::Boolean) } - def self.===(other) - end - - sig { params(other: T.anything).returns(T::Boolean) } - def self.==(other) - end - - class << self - sig { override.params(value: T.anything).returns(T.anything) } - def coerce(value) - end - - sig { override.params(value: T.anything).returns(T.anything) } - def dump(value) - end - - sig do - override - .params(value: T.anything) - .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) - end - def try_strict_coerce(value) - end - end - end - - class BooleanModel - abstract! - - extend OpenAI::Converter - - sig { params(other: T.anything).returns(T::Boolean) } - def self.===(other) - end - - sig { params(other: T.anything).returns(T::Boolean) } - def self.==(other) - end - - class << self - sig { override.params(value: T.any(T::Boolean, T.anything)).returns(T.any(T::Boolean, T.anything)) } - def coerce(value) - end - - sig { override.params(value: T.any(T::Boolean, T.anything)).returns(T.any(T::Boolean, T.anything)) } - def dump(value) - end - - sig do - override - .params(value: T.anything) - .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) - end - def try_strict_coerce(value) - end - end - end - - class Enum - abstract! - - extend OpenAI::Converter - - class << self - sig { overridable.returns(T::Array[T.any(NilClass, T::Boolean, Integer, Float, Symbol)]) } - def values - end - - sig { void } - private def finalize! - end - end - - sig { params(other: T.anything).returns(T::Boolean) } - def self.===(other) - end - - sig { params(other: T.anything).returns(T::Boolean) } - def self.==(other) - end - - class << self - sig { override.params(value: T.any(String, Symbol, T.anything)).returns(T.any(Symbol, T.anything)) } - def coerce(value) - end - - sig { override.params(value: T.any(Symbol, T.anything)).returns(T.any(Symbol, T.anything)) } - def dump(value) - end - - sig do - override - .params(value: T.anything) - .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) - end - def try_strict_coerce(value) - end - end - end - - class Union - abstract! - - extend OpenAI::Converter - - class << self - sig { returns(T::Array[[T.nilable(Symbol), Proc]]) } - private def known_variants - end - - sig { overridable.returns(T::Array[[T.nilable(Symbol), T.anything]]) } - protected def variants - end - - sig { params(property: Symbol).void } - private def discriminator(property) - end - - sig do - params( - key: T.any( - Symbol, - T::Hash[Symbol, T.anything], - T.proc.returns(OpenAI::Converter::Input), - OpenAI::Converter::Input - ), - spec: T.any( - T::Hash[Symbol, T.anything], - T.proc.returns(OpenAI::Converter::Input), - OpenAI::Converter::Input - ) - ) - .void - end - private def variant(key, spec = nil) - end - - sig { params(value: T.anything).returns(T.nilable(OpenAI::Converter::Input)) } - private def resolve_variant(value) - end - end - - sig { params(other: T.anything).returns(T::Boolean) } - def self.===(other) - end - - sig { params(other: T.anything).returns(T::Boolean) } - def self.==(other) - end - - class << self - sig { override.params(value: T.anything).returns(T.anything) } - def coerce(value) - end - - sig { override.params(value: T.anything).returns(T.anything) } - def dump(value) - end - - sig do - override - .params(value: T.anything) - .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) - end - def try_strict_coerce(value) - end - end - end - - class ArrayOf - abstract! - - include OpenAI::Converter - - sig { params(other: T.anything).returns(T::Boolean) } - def ===(other) - end - - sig { params(other: T.anything).returns(T::Boolean) } - def ==(other) - end - - sig do - override - .params(value: T.any(T::Enumerable[T.anything], T.anything)) - .returns(T.any(T::Array[T.anything], T.anything)) - end - def coerce(value) - end - - sig do - override - .params(value: T.any(T::Enumerable[T.anything], T.anything)) - .returns(T.any(T::Array[T.anything], T.anything)) - end - def dump(value) - end - - sig do - override - .params(value: T.anything) - .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) - end - def try_strict_coerce(value) - end - - sig { returns(OpenAI::Converter::Input) } - protected def item_type - end - - sig do - params( - type_info: T.any( - T::Hash[Symbol, T.anything], - T.proc.returns(OpenAI::Converter::Input), - OpenAI::Converter::Input - ), - spec: T::Hash[Symbol, T.anything] - ) - .returns(T.attached_class) - end - def self.new(type_info, spec = {}) - end - end - - class HashOf - abstract! - - include OpenAI::Converter - - sig { params(other: T.anything).returns(T::Boolean) } - def ===(other) - end - - sig { params(other: T.anything).returns(T::Boolean) } - def ==(other) - end - - sig do - override - .params(value: T.any(T::Hash[T.anything, T.anything], T.anything)) - .returns(T.any(T::Hash[Symbol, T.anything], T.anything)) - end - def coerce(value) - end - - sig do - override - .params(value: T.any(T::Hash[T.anything, T.anything], T.anything)) - .returns(T.any(T::Hash[Symbol, T.anything], T.anything)) - end - def dump(value) - end - - sig do - override - .params(value: T.anything) - .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) - end - def try_strict_coerce(value) - end - - sig { returns(OpenAI::Converter::Input) } - protected def item_type - end - - sig do - params( - type_info: T.any( - T::Hash[Symbol, T.anything], - T.proc.returns(OpenAI::Converter::Input), - OpenAI::Converter::Input - ), - spec: T::Hash[Symbol, T.anything] - ) - .returns(T.attached_class) - end - def self.new(type_info, spec = {}) - end - end - - class BaseModel - abstract! - - extend OpenAI::Converter - - KnownFieldShape = T.type_alias { {mode: T.nilable(Symbol), required: T::Boolean} } - - class << self - sig do - returns( - T::Hash[Symbol, - T.all( - OpenAI::BaseModel::KnownFieldShape, - {type_fn: T.proc.returns(OpenAI::Converter::Input)} - )] - ) - end - def known_fields - end - - sig do - returns(T::Hash[Symbol, T.all(OpenAI::BaseModel::KnownFieldShape, {type: OpenAI::Converter::Input})]) - end - def fields - end - - sig { returns(T::Hash[Symbol, T.proc.returns(T::Class[T.anything])]) } - def defaults - end - - sig do - params( - name_sym: Symbol, - required: T::Boolean, - type_info: T.any( - { - const: T.nilable(T.any(NilClass, T::Boolean, Integer, Float, Symbol)), - enum: T.nilable(T.proc.returns(OpenAI::Converter::Input)), - union: T.nilable(T.proc.returns(OpenAI::Converter::Input)), - api_name: Symbol, - nil?: T::Boolean - }, - T.proc.returns(OpenAI::Converter::Input), - OpenAI::Converter::Input - ), - spec: T::Hash[Symbol, T.anything] - ) - .void - end - private def add_field(name_sym, required:, type_info:, spec:) - end - - sig do - params( - name_sym: Symbol, - type_info: T.any( - T::Hash[Symbol, T.anything], - T.proc.returns(OpenAI::Converter::Input), - OpenAI::Converter::Input - ), - spec: T::Hash[Symbol, T.anything] - ) - .void - end - def required(name_sym, type_info, spec = {}) - end - - sig do - params( - name_sym: Symbol, - type_info: T.any( - T::Hash[Symbol, T.anything], - T.proc.returns(OpenAI::Converter::Input), - OpenAI::Converter::Input - ), - spec: T::Hash[Symbol, T.anything] - ) - .void - end - def optional(name_sym, type_info, spec = {}) - end - - sig { params(blk: T.proc.void).void } - private def request_only(&blk) - end - - sig { params(blk: T.proc.void).void } - private def response_only(&blk) - end - end - - sig { params(other: T.anything).returns(T::Boolean) } - def ==(other) - end - - class << self - sig do - override - .params(value: T.any(OpenAI::BaseModel, T::Hash[T.anything, T.anything], T.anything)) - .returns(T.any(T.attached_class, T.anything)) - end - def coerce(value) - end - - sig do - override - .params(value: T.any(T.attached_class, T.anything)) - .returns(T.any(T::Hash[T.anything, T.anything], T.anything)) - end - def dump(value) - end - - sig do - override - .params(value: T.anything) - .returns(T.any([T::Boolean, T.anything, NilClass], [T::Boolean, T::Boolean, Integer])) - end - def try_strict_coerce(value) - end - end - - sig { params(key: Symbol).returns(T.nilable(T.anything)) } - def [](key) - end - - sig { overridable.returns(T::Hash[Symbol, T.anything]) } - def to_h - end - - alias_method :to_hash, :to_h - - sig { params(keys: T.nilable(T::Array[Symbol])).returns(T::Hash[Symbol, T.anything]) } - def deconstruct_keys(keys) - end - - sig { params(data: T.any(T::Hash[Symbol, T.anything], T.self_type)).returns(T.attached_class) } - def self.new(data = {}) - end - - sig { returns(String) } - def to_s - end - - sig { returns(String) } - def inspect - end - end -end diff --git a/rbi/lib/openai/base_page.rbi b/rbi/lib/openai/base_page.rbi deleted file mode 100644 index b4a17615..00000000 --- a/rbi/lib/openai/base_page.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module BasePage - abstract! - - Elem = type_member(:out) - - sig { overridable.returns(T::Boolean) } - def next_page? - end - - sig { overridable.returns(T.self_type) } - def next_page - end - - sig { overridable.params(blk: T.proc.params(arg0: Elem).void).void } - def auto_paging_each(&blk) - end - - sig { returns(T::Enumerable[Elem]) } - def to_enum - end - - alias_method :enum_for, :to_enum - - sig do - params( - client: OpenAI::BaseClient, - req: OpenAI::BaseClient::RequestComponentsShape, - headers: T.any(T::Hash[String, String], Net::HTTPHeader), - page_data: T.anything - ) - .void - end - def initialize(client:, req:, headers:, page_data:) - end - end -end diff --git a/rbi/lib/openai/base_stream.rbi b/rbi/lib/openai/base_stream.rbi deleted file mode 100644 index edd7627b..00000000 --- a/rbi/lib/openai/base_stream.rbi +++ /dev/null @@ -1,38 +0,0 @@ -# typed: strong - -module OpenAI - class BaseStream - Elem = type_member(:out) - - sig do - params( - model: T.any(T::Class[T.anything], OpenAI::Converter), - url: URI::Generic, - status: Integer, - response: Net::HTTPResponse, - messages: T::Enumerable[OpenAI::Util::SSEMessage] - ) - .returns(T.attached_class) - end - def self.new(model:, url:, status:, response:, messages:) - end - - sig { overridable.returns(T::Enumerable[Elem]) } - private def iterator - end - - sig { void } - def close - end - - sig { params(blk: T.proc.params(arg0: Elem).void).void } - def for_each(&blk) - end - - sig { returns(T::Enumerable[Elem]) } - def to_enum - end - - alias_method :enum_for, :to_enum - end -end diff --git a/rbi/lib/openai/client.rbi b/rbi/lib/openai/client.rbi deleted file mode 100644 index ba6253be..00000000 --- a/rbi/lib/openai/client.rbi +++ /dev/null @@ -1,110 +0,0 @@ -# typed: strong - -module OpenAI - class Client < OpenAI::BaseClient - DEFAULT_MAX_RETRIES = 2 - - DEFAULT_TIMEOUT_IN_SECONDS = T.let(600.0, Float) - - DEFAULT_INITIAL_RETRY_DELAY = T.let(0.5, Float) - - DEFAULT_MAX_RETRY_DELAY = T.let(8.0, Float) - - sig { returns(String) } - def api_key - end - - sig { returns(T.nilable(String)) } - def organization - end - - sig { returns(T.nilable(String)) } - def project - end - - sig { returns(OpenAI::Resources::Completions) } - def completions - end - - sig { returns(OpenAI::Resources::Chat) } - def chat - end - - sig { returns(OpenAI::Resources::Embeddings) } - def embeddings - end - - sig { returns(OpenAI::Resources::Files) } - def files - end - - sig { returns(OpenAI::Resources::Images) } - def images - end - - sig { returns(OpenAI::Resources::Audio) } - def audio - end - - sig { returns(OpenAI::Resources::Moderations) } - def moderations - end - - sig { returns(OpenAI::Resources::Models) } - def models - end - - sig { returns(OpenAI::Resources::FineTuning) } - def fine_tuning - end - - sig { returns(OpenAI::Resources::VectorStores) } - def vector_stores - end - - sig { returns(OpenAI::Resources::Beta) } - def beta - end - - sig { returns(OpenAI::Resources::Batches) } - def batches - end - - sig { returns(OpenAI::Resources::Uploads) } - def uploads - end - - sig { returns(OpenAI::Resources::Responses) } - def responses - end - - sig { override.returns(T::Hash[String, String]) } - private def auth_headers - end - - sig do - params( - base_url: T.nilable(String), - api_key: T.nilable(String), - organization: T.nilable(String), - project: T.nilable(String), - max_retries: Integer, - timeout: Float, - initial_retry_delay: Float, - max_retry_delay: Float - ) - .returns(T.attached_class) - end - def self.new( - base_url: nil, - api_key: ENV["OPENAI_API_KEY"], - organization: ENV["OPENAI_ORG_ID"], - project: ENV["OPENAI_PROJECT_ID"], - max_retries: DEFAULT_MAX_RETRIES, - timeout: DEFAULT_TIMEOUT_IN_SECONDS, - initial_retry_delay: DEFAULT_INITIAL_RETRY_DELAY, - max_retry_delay: DEFAULT_MAX_RETRY_DELAY - ) - end - end -end diff --git a/rbi/lib/openai/cursor_page.rbi b/rbi/lib/openai/cursor_page.rbi deleted file mode 100644 index d7c63bcf..00000000 --- a/rbi/lib/openai/cursor_page.rbi +++ /dev/null @@ -1,37 +0,0 @@ -# typed: strong - -module OpenAI - class CursorPage - include OpenAI::BasePage - - Elem = type_member - - sig { returns(T::Array[Elem]) } - def data - end - - sig { params(_: T::Array[Elem]).returns(T::Array[Elem]) } - def data=(_) - end - - sig { returns(T::Boolean) } - def has_more - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def has_more=(_) - end - - sig do - params( - client: OpenAI::BaseClient, - req: OpenAI::BaseClient::RequestComponentsShape, - headers: T.any(T::Hash[String, String], Net::HTTPHeader), - page_data: T::Hash[Symbol, T.anything] - ) - .void - end - def initialize(client:, req:, headers:, page_data:) - end - end -end diff --git a/rbi/lib/openai/errors.rbi b/rbi/lib/openai/errors.rbi deleted file mode 100644 index 69c98916..00000000 --- a/rbi/lib/openai/errors.rbi +++ /dev/null @@ -1,182 +0,0 @@ -# typed: strong - -module OpenAI - class Error < StandardError - sig { returns(T.nilable(StandardError)) } - def cause - end - end - - class ConversionError < OpenAI::Error - end - - class APIError < OpenAI::Error - sig { returns(URI::Generic) } - def url - end - - sig { returns(T.nilable(Integer)) } - def status - end - - sig { returns(T.nilable(T.anything)) } - def body - end - - sig { returns(T.nilable(String)) } - def code - end - - sig { returns(T.nilable(String)) } - def param - end - - sig { returns(T.nilable(String)) } - def type - end - - sig do - params( - url: URI::Generic, - status: T.nilable(Integer), - body: T.nilable(Object), - request: NilClass, - response: NilClass, - message: T.nilable(String) - ) - .returns(T.attached_class) - end - def self.new(url:, status: nil, body: nil, request: nil, response: nil, message: nil) - end - end - - class APIConnectionError < OpenAI::APIError - sig { void } - def status - end - - sig { void } - def body - end - - sig { void } - def code - end - - sig { void } - def param - end - - sig { void } - def type - end - - sig do - params( - url: URI::Generic, - status: NilClass, - body: NilClass, - request: NilClass, - response: NilClass, - message: T.nilable(String) - ) - .returns(T.attached_class) - end - def self.new(url:, status: nil, body: nil, request: nil, response: nil, message: "Connection error.") - end - end - - class APITimeoutError < OpenAI::APIConnectionError - sig do - params( - url: URI::Generic, - status: NilClass, - body: NilClass, - request: NilClass, - response: NilClass, - message: T.nilable(String) - ) - .returns(T.attached_class) - end - def self.new(url:, status: nil, body: nil, request: nil, response: nil, message: "Request timed out.") - end - end - - class APIStatusError < OpenAI::APIError - sig do - params( - url: URI::Generic, - status: Integer, - body: T.nilable(Object), - request: NilClass, - response: NilClass, - message: T.nilable(String) - ) - .returns(T.attached_class) - end - def self.for(url:, status:, body:, request:, response:, message: nil) - end - - sig { returns(Integer) } - def status - end - - sig { returns(T.nilable(String)) } - def code - end - - sig { returns(T.nilable(String)) } - def param - end - - sig { returns(T.nilable(String)) } - def type - end - - sig do - params( - url: URI::Generic, - status: Integer, - body: T.nilable(Object), - request: NilClass, - response: NilClass, - message: T.nilable(String) - ) - .returns(T.attached_class) - end - def self.new(url:, status:, body:, request:, response:, message: nil) - end - end - - class BadRequestError < OpenAI::APIStatusError - HTTP_STATUS = 400 - end - - class AuthenticationError < OpenAI::APIStatusError - HTTP_STATUS = 401 - end - - class PermissionDeniedError < OpenAI::APIStatusError - HTTP_STATUS = 403 - end - - class NotFoundError < OpenAI::APIStatusError - HTTP_STATUS = 404 - end - - class ConflictError < OpenAI::APIStatusError - HTTP_STATUS = 409 - end - - class UnprocessableEntityError < OpenAI::APIStatusError - HTTP_STATUS = 422 - end - - class RateLimitError < OpenAI::APIStatusError - HTTP_STATUS = 429 - end - - class InternalServerError < OpenAI::APIStatusError - HTTP_STATUS = T.let((500..), T::Range[Integer]) - end -end diff --git a/rbi/lib/openai/extern.rbi b/rbi/lib/openai/extern.rbi deleted file mode 100644 index ca7768e3..00000000 --- a/rbi/lib/openai/extern.rbi +++ /dev/null @@ -1,7 +0,0 @@ -# typed: strong - -module OpenAI - module Extern - abstract! - end -end diff --git a/rbi/lib/openai/models/audio/speech_create_params.rbi b/rbi/lib/openai/models/audio/speech_create_params.rbi deleted file mode 100644 index db0763fb..00000000 --- a/rbi/lib/openai/models/audio/speech_create_params.rbi +++ /dev/null @@ -1,129 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class SpeechCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def input - end - - sig { params(_: String).returns(String) } - def input=(_) - end - - sig { returns(T.any(String, Symbol)) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig { returns(Symbol) } - def voice - end - - sig { params(_: Symbol).returns(Symbol) } - def voice=(_) - end - - sig { returns(T.nilable(Symbol)) } - def response_format - end - - sig { params(_: Symbol).returns(Symbol) } - def response_format=(_) - end - - sig { returns(T.nilable(Float)) } - def speed - end - - sig { params(_: Float).returns(Float) } - def speed=(_) - end - - sig do - params( - input: String, - model: T.any(String, Symbol), - voice: Symbol, - response_format: Symbol, - speed: Float, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(input:, model:, voice:, response_format: nil, speed: nil, request_options: {}) - end - - sig do - override - .returns( - { - input: String, - model: T.any(String, Symbol), - voice: Symbol, - response_format: Symbol, - speed: Float, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class Voice < OpenAI::Enum - abstract! - - ALLOY = :alloy - ASH = :ash - CORAL = :coral - ECHO = :echo - FABLE = :fable - ONYX = :onyx - NOVA = :nova - SAGE = :sage - SHIMMER = :shimmer - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class ResponseFormat < OpenAI::Enum - abstract! - - MP3 = :mp3 - OPUS = :opus - AAC = :aac - FLAC = :flac - WAV = :wav - PCM = :pcm - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/speech_model.rbi b/rbi/lib/openai/models/audio/speech_model.rbi deleted file mode 100644 index 5caef0af..00000000 --- a/rbi/lib/openai/models/audio/speech_model.rbi +++ /dev/null @@ -1,20 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class SpeechModel < OpenAI::Enum - abstract! - - TTS_1 = :"tts-1" - TTS_1_HD = :"tts-1-hd" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/transcription.rbi b/rbi/lib/openai/models/audio/transcription.rbi deleted file mode 100644 index c314036c..00000000 --- a/rbi/lib/openai/models/audio/transcription.rbi +++ /dev/null @@ -1,25 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class Transcription < OpenAI::BaseModel - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { params(text: String).returns(T.attached_class) } - def self.new(text:) - end - - sig { override.returns({text: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/transcription_create_params.rbi b/rbi/lib/openai/models/audio/transcription_create_params.rbi deleted file mode 100644 index 324c2060..00000000 --- a/rbi/lib/openai/models/audio/transcription_create_params.rbi +++ /dev/null @@ -1,134 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class TranscriptionCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(IO, StringIO)) } - def file - end - - sig { params(_: T.any(IO, StringIO)).returns(T.any(IO, StringIO)) } - def file=(_) - end - - sig { returns(T.any(String, Symbol)) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig { returns(T.nilable(String)) } - def language - end - - sig { params(_: String).returns(String) } - def language=(_) - end - - sig { returns(T.nilable(String)) } - def prompt - end - - sig { params(_: String).returns(String) } - def prompt=(_) - end - - sig { returns(T.nilable(Symbol)) } - def response_format - end - - sig { params(_: Symbol).returns(Symbol) } - def response_format=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: Float).returns(Float) } - def temperature=(_) - end - - sig { returns(T.nilable(T::Array[Symbol])) } - def timestamp_granularities - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def timestamp_granularities=(_) - end - - sig do - params( - file: T.any(IO, StringIO), - model: T.any(String, Symbol), - language: String, - prompt: String, - response_format: Symbol, - temperature: Float, - timestamp_granularities: T::Array[Symbol], - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - file:, - model:, - language: nil, - prompt: nil, - response_format: nil, - temperature: nil, - timestamp_granularities: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - file: T.any(IO, StringIO), - model: T.any(String, Symbol), - language: String, - prompt: String, - response_format: Symbol, - temperature: Float, - timestamp_granularities: T::Array[Symbol], - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class TimestampGranularity < OpenAI::Enum - abstract! - - WORD = :word - SEGMENT = :segment - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/transcription_create_response.rbi b/rbi/lib/openai/models/audio/transcription_create_response.rbi deleted file mode 100644 index ba16b2e7..00000000 --- a/rbi/lib/openai/models/audio/transcription_create_response.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class TranscriptionCreateResponse < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::Audio::Transcription], [NilClass, OpenAI::Models::Audio::TranscriptionVerbose]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/transcription_segment.rbi b/rbi/lib/openai/models/audio/transcription_segment.rbi deleted file mode 100644 index 2e1f1c65..00000000 --- a/rbi/lib/openai/models/audio/transcription_segment.rbi +++ /dev/null @@ -1,127 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class TranscriptionSegment < OpenAI::BaseModel - sig { returns(Integer) } - def id - end - - sig { params(_: Integer).returns(Integer) } - def id=(_) - end - - sig { returns(Float) } - def avg_logprob - end - - sig { params(_: Float).returns(Float) } - def avg_logprob=(_) - end - - sig { returns(Float) } - def compression_ratio - end - - sig { params(_: Float).returns(Float) } - def compression_ratio=(_) - end - - sig { returns(Float) } - def end_ - end - - sig { params(_: Float).returns(Float) } - def end_=(_) - end - - sig { returns(Float) } - def no_speech_prob - end - - sig { params(_: Float).returns(Float) } - def no_speech_prob=(_) - end - - sig { returns(Integer) } - def seek - end - - sig { params(_: Integer).returns(Integer) } - def seek=(_) - end - - sig { returns(Float) } - def start - end - - sig { params(_: Float).returns(Float) } - def start=(_) - end - - sig { returns(Float) } - def temperature - end - - sig { params(_: Float).returns(Float) } - def temperature=(_) - end - - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(T::Array[Integer]) } - def tokens - end - - sig { params(_: T::Array[Integer]).returns(T::Array[Integer]) } - def tokens=(_) - end - - sig do - params( - id: Integer, - avg_logprob: Float, - compression_ratio: Float, - end_: Float, - no_speech_prob: Float, - seek: Integer, - start: Float, - temperature: Float, - text: String, - tokens: T::Array[Integer] - ) - .returns(T.attached_class) - end - def self.new(id:, avg_logprob:, compression_ratio:, end_:, no_speech_prob:, seek:, start:, temperature:, text:, tokens:) - end - - sig do - override - .returns( - { - id: Integer, - avg_logprob: Float, - compression_ratio: Float, - end_: Float, - no_speech_prob: Float, - seek: Integer, - start: Float, - temperature: Float, - text: String, - tokens: T::Array[Integer] - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/transcription_verbose.rbi b/rbi/lib/openai/models/audio/transcription_verbose.rbi deleted file mode 100644 index 7dbf05e8..00000000 --- a/rbi/lib/openai/models/audio/transcription_verbose.rbi +++ /dev/null @@ -1,83 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class TranscriptionVerbose < OpenAI::BaseModel - sig { returns(Float) } - def duration - end - - sig { params(_: Float).returns(Float) } - def duration=(_) - end - - sig { returns(String) } - def language - end - - sig { params(_: String).returns(String) } - def language=(_) - end - - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionSegment])) } - def segments - end - - sig do - params(_: T::Array[OpenAI::Models::Audio::TranscriptionSegment]) - .returns(T::Array[OpenAI::Models::Audio::TranscriptionSegment]) - end - def segments=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionWord])) } - def words - end - - sig do - params(_: T::Array[OpenAI::Models::Audio::TranscriptionWord]) - .returns(T::Array[OpenAI::Models::Audio::TranscriptionWord]) - end - def words=(_) - end - - sig do - params( - duration: Float, - language: String, - text: String, - segments: T::Array[OpenAI::Models::Audio::TranscriptionSegment], - words: T::Array[OpenAI::Models::Audio::TranscriptionWord] - ) - .returns(T.attached_class) - end - def self.new(duration:, language:, text:, segments: nil, words: nil) - end - - sig do - override - .returns( - { - duration: Float, - language: String, - text: String, - segments: T::Array[OpenAI::Models::Audio::TranscriptionSegment], - words: T::Array[OpenAI::Models::Audio::TranscriptionWord] - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/transcription_word.rbi b/rbi/lib/openai/models/audio/transcription_word.rbi deleted file mode 100644 index 8a81058f..00000000 --- a/rbi/lib/openai/models/audio/transcription_word.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class TranscriptionWord < OpenAI::BaseModel - sig { returns(Float) } - def end_ - end - - sig { params(_: Float).returns(Float) } - def end_=(_) - end - - sig { returns(Float) } - def start - end - - sig { params(_: Float).returns(Float) } - def start=(_) - end - - sig { returns(String) } - def word - end - - sig { params(_: String).returns(String) } - def word=(_) - end - - sig { params(end_: Float, start: Float, word: String).returns(T.attached_class) } - def self.new(end_:, start:, word:) - end - - sig { override.returns({end_: Float, start: Float, word: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/translation.rbi b/rbi/lib/openai/models/audio/translation.rbi deleted file mode 100644 index b9650fa4..00000000 --- a/rbi/lib/openai/models/audio/translation.rbi +++ /dev/null @@ -1,25 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class Translation < OpenAI::BaseModel - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { params(text: String).returns(T.attached_class) } - def self.new(text:) - end - - sig { override.returns({text: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/translation_create_params.rbi b/rbi/lib/openai/models/audio/translation_create_params.rbi deleted file mode 100644 index c8175a8d..00000000 --- a/rbi/lib/openai/models/audio/translation_create_params.rbi +++ /dev/null @@ -1,92 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class TranslationCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(IO, StringIO)) } - def file - end - - sig { params(_: T.any(IO, StringIO)).returns(T.any(IO, StringIO)) } - def file=(_) - end - - sig { returns(T.any(String, Symbol)) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig { returns(T.nilable(String)) } - def prompt - end - - sig { params(_: String).returns(String) } - def prompt=(_) - end - - sig { returns(T.nilable(Symbol)) } - def response_format - end - - sig { params(_: Symbol).returns(Symbol) } - def response_format=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: Float).returns(Float) } - def temperature=(_) - end - - sig do - params( - file: T.any(IO, StringIO), - model: T.any(String, Symbol), - prompt: String, - response_format: Symbol, - temperature: Float, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(file:, model:, prompt: nil, response_format: nil, temperature: nil, request_options: {}) - end - - sig do - override - .returns( - { - file: T.any(IO, StringIO), - model: T.any(String, Symbol), - prompt: String, - response_format: Symbol, - temperature: Float, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/translation_create_response.rbi b/rbi/lib/openai/models/audio/translation_create_response.rbi deleted file mode 100644 index 9a25186f..00000000 --- a/rbi/lib/openai/models/audio/translation_create_response.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class TranslationCreateResponse < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::Audio::Translation], [NilClass, OpenAI::Models::Audio::TranslationVerbose]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio/translation_verbose.rbi b/rbi/lib/openai/models/audio/translation_verbose.rbi deleted file mode 100644 index 566f2db2..00000000 --- a/rbi/lib/openai/models/audio/translation_verbose.rbi +++ /dev/null @@ -1,65 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Audio - class TranslationVerbose < OpenAI::BaseModel - sig { returns(Float) } - def duration - end - - sig { params(_: Float).returns(Float) } - def duration=(_) - end - - sig { returns(String) } - def language - end - - sig { params(_: String).returns(String) } - def language=(_) - end - - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionSegment])) } - def segments - end - - sig do - params(_: T::Array[OpenAI::Models::Audio::TranscriptionSegment]) - .returns(T::Array[OpenAI::Models::Audio::TranscriptionSegment]) - end - def segments=(_) - end - - sig do - params( - duration: Float, - language: String, - text: String, - segments: T::Array[OpenAI::Models::Audio::TranscriptionSegment] - ) - .returns(T.attached_class) - end - def self.new(duration:, language:, text:, segments: nil) - end - - sig do - override - .returns( - {duration: Float, language: String, text: String, segments: T::Array[OpenAI::Models::Audio::TranscriptionSegment]} - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio_model.rbi b/rbi/lib/openai/models/audio_model.rbi deleted file mode 100644 index 7f4186f7..00000000 --- a/rbi/lib/openai/models/audio_model.rbi +++ /dev/null @@ -1,17 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class AudioModel < OpenAI::Enum - abstract! - - WHISPER_1 = :"whisper-1" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end -end diff --git a/rbi/lib/openai/models/audio_response_format.rbi b/rbi/lib/openai/models/audio_response_format.rbi deleted file mode 100644 index 087af985..00000000 --- a/rbi/lib/openai/models/audio_response_format.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class AudioResponseFormat < OpenAI::Enum - abstract! - - JSON = :json - TEXT = :text - SRT = :srt - VERBOSE_JSON = :verbose_json - VTT = :vtt - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end -end diff --git a/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi b/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi deleted file mode 100644 index 5d106690..00000000 --- a/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class AutoFileChunkingStrategyParam < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :auto) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/batch.rbi b/rbi/lib/openai/models/batch.rbi deleted file mode 100644 index cec4b9fb..00000000 --- a/rbi/lib/openai/models/batch.rbi +++ /dev/null @@ -1,291 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class Batch < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def completion_window - end - - sig { params(_: String).returns(String) } - def completion_window=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(String) } - def endpoint - end - - sig { params(_: String).returns(String) } - def endpoint=(_) - end - - sig { returns(String) } - def input_file_id - end - - sig { params(_: String).returns(String) } - def input_file_id=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(T.nilable(Integer)) } - def cancelled_at - end - - sig { params(_: Integer).returns(Integer) } - def cancelled_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def cancelling_at - end - - sig { params(_: Integer).returns(Integer) } - def cancelling_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def completed_at - end - - sig { params(_: Integer).returns(Integer) } - def completed_at=(_) - end - - sig { returns(T.nilable(String)) } - def error_file_id - end - - sig { params(_: String).returns(String) } - def error_file_id=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Batch::Errors)) } - def errors - end - - sig { params(_: OpenAI::Models::Batch::Errors).returns(OpenAI::Models::Batch::Errors) } - def errors=(_) - end - - sig { returns(T.nilable(Integer)) } - def expired_at - end - - sig { params(_: Integer).returns(Integer) } - def expired_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def expires_at - end - - sig { params(_: Integer).returns(Integer) } - def expires_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def failed_at - end - - sig { params(_: Integer).returns(Integer) } - def failed_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def finalizing_at - end - - sig { params(_: Integer).returns(Integer) } - def finalizing_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def in_progress_at - end - - sig { params(_: Integer).returns(Integer) } - def in_progress_at=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(String)) } - def output_file_id - end - - sig { params(_: String).returns(String) } - def output_file_id=(_) - end - - sig { returns(T.nilable(OpenAI::Models::BatchRequestCounts)) } - def request_counts - end - - sig { params(_: OpenAI::Models::BatchRequestCounts).returns(OpenAI::Models::BatchRequestCounts) } - def request_counts=(_) - end - - sig do - params( - id: String, - completion_window: String, - created_at: Integer, - endpoint: String, - input_file_id: String, - status: Symbol, - cancelled_at: Integer, - cancelling_at: Integer, - completed_at: Integer, - error_file_id: String, - errors: OpenAI::Models::Batch::Errors, - expired_at: Integer, - expires_at: Integer, - failed_at: Integer, - finalizing_at: Integer, - in_progress_at: Integer, - metadata: T.nilable(OpenAI::Models::Metadata), - output_file_id: String, - request_counts: OpenAI::Models::BatchRequestCounts, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - completion_window:, - created_at:, - endpoint:, - input_file_id:, - status:, - cancelled_at: nil, - cancelling_at: nil, - completed_at: nil, - error_file_id: nil, - errors: nil, - expired_at: nil, - expires_at: nil, - failed_at: nil, - finalizing_at: nil, - in_progress_at: nil, - metadata: nil, - output_file_id: nil, - request_counts: nil, - object: :batch - ) - end - - sig do - override - .returns( - { - id: String, - completion_window: String, - created_at: Integer, - endpoint: String, - input_file_id: String, - object: Symbol, - status: Symbol, - cancelled_at: Integer, - cancelling_at: Integer, - completed_at: Integer, - error_file_id: String, - errors: OpenAI::Models::Batch::Errors, - expired_at: Integer, - expires_at: Integer, - failed_at: Integer, - finalizing_at: Integer, - in_progress_at: Integer, - metadata: T.nilable(OpenAI::Models::Metadata), - output_file_id: String, - request_counts: OpenAI::Models::BatchRequestCounts - } - ) - end - def to_hash - end - - class Status < OpenAI::Enum - abstract! - - VALIDATING = :validating - FAILED = :failed - IN_PROGRESS = :in_progress - FINALIZING = :finalizing - COMPLETED = :completed - EXPIRED = :expired - CANCELLING = :cancelling - CANCELLED = :cancelled - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Errors < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[OpenAI::Models::BatchError])) } - def data - end - - sig { params(_: T::Array[OpenAI::Models::BatchError]).returns(T::Array[OpenAI::Models::BatchError]) } - def data=(_) - end - - sig { returns(T.nilable(String)) } - def object - end - - sig { params(_: String).returns(String) } - def object=(_) - end - - sig { params(data: T::Array[OpenAI::Models::BatchError], object: String).returns(T.attached_class) } - def self.new(data: nil, object: nil) - end - - sig { override.returns({data: T::Array[OpenAI::Models::BatchError], object: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/batch_cancel_params.rbi b/rbi/lib/openai/models/batch_cancel_params.rbi deleted file mode 100644 index fba65e6e..00000000 --- a/rbi/lib/openai/models/batch_cancel_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class BatchCancelParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/batch_create_params.rbi b/rbi/lib/openai/models/batch_create_params.rbi deleted file mode 100644 index ff5efeab..00000000 --- a/rbi/lib/openai/models/batch_create_params.rbi +++ /dev/null @@ -1,96 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class BatchCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(Symbol) } - def completion_window - end - - sig { params(_: Symbol).returns(Symbol) } - def completion_window=(_) - end - - sig { returns(Symbol) } - def endpoint - end - - sig { params(_: Symbol).returns(Symbol) } - def endpoint=(_) - end - - sig { returns(String) } - def input_file_id - end - - sig { params(_: String).returns(String) } - def input_file_id=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - completion_window: Symbol, - endpoint: Symbol, - input_file_id: String, - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(completion_window:, endpoint:, input_file_id:, metadata: nil, request_options: {}) - end - - sig do - override - .returns( - { - completion_window: Symbol, - endpoint: Symbol, - input_file_id: String, - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class CompletionWindow < OpenAI::Enum - abstract! - - NUMBER_24H = :"24h" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Endpoint < OpenAI::Enum - abstract! - - V1_CHAT_COMPLETIONS = :"/v1/chat/completions" - V1_EMBEDDINGS = :"/v1/embeddings" - V1_COMPLETIONS = :"/v1/completions" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/batch_error.rbi b/rbi/lib/openai/models/batch_error.rbi deleted file mode 100644 index b0742afb..00000000 --- a/rbi/lib/openai/models/batch_error.rbi +++ /dev/null @@ -1,52 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class BatchError < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def code - end - - sig { params(_: String).returns(String) } - def code=(_) - end - - sig { returns(T.nilable(Integer)) } - def line - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def line=(_) - end - - sig { returns(T.nilable(String)) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { returns(T.nilable(String)) } - def param - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def param=(_) - end - - sig do - params(code: String, line: T.nilable(Integer), message: String, param: T.nilable(String)) - .returns(T.attached_class) - end - def self.new(code: nil, line: nil, message: nil, param: nil) - end - - sig do - override.returns({code: String, line: T.nilable(Integer), message: String, param: T.nilable(String)}) - end - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/batch_list_params.rbi b/rbi/lib/openai/models/batch_list_params.rbi deleted file mode 100644 index 5d04cdfd..00000000 --- a/rbi/lib/openai/models/batch_list_params.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class BatchListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig do - params( - after: String, - limit: Integer, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, limit: nil, request_options: {}) - end - - sig { override.returns({after: String, limit: Integer, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/batch_request_counts.rbi b/rbi/lib/openai/models/batch_request_counts.rbi deleted file mode 100644 index 59b86c64..00000000 --- a/rbi/lib/openai/models/batch_request_counts.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class BatchRequestCounts < OpenAI::BaseModel - sig { returns(Integer) } - def completed - end - - sig { params(_: Integer).returns(Integer) } - def completed=(_) - end - - sig { returns(Integer) } - def failed - end - - sig { params(_: Integer).returns(Integer) } - def failed=(_) - end - - sig { returns(Integer) } - def total - end - - sig { params(_: Integer).returns(Integer) } - def total=(_) - end - - sig { params(completed: Integer, failed: Integer, total: Integer).returns(T.attached_class) } - def self.new(completed:, failed:, total:) - end - - sig { override.returns({completed: Integer, failed: Integer, total: Integer}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/batch_retrieve_params.rbi b/rbi/lib/openai/models/batch_retrieve_params.rbi deleted file mode 100644 index 56cbcb05..00000000 --- a/rbi/lib/openai/models/batch_retrieve_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class BatchRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant.rbi b/rbi/lib/openai/models/beta/assistant.rbi deleted file mode 100644 index c573af22..00000000 --- a/rbi/lib/openai/models/beta/assistant.rbi +++ /dev/null @@ -1,343 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class Assistant < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(T.nilable(String)) } - def description - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def description=(_) - end - - sig { returns(T.nilable(String)) } - def instructions - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def instructions=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(String) } - def model - end - - sig { params(_: String).returns(String) } - def model=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def name=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - end - def tools=(_) - end - - sig do - returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format - end - - sig do - params( - _: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - .returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def temperature=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Assistant::ToolResources)) } - def tool_resources - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Assistant::ToolResources)) - .returns(T.nilable(OpenAI::Models::Beta::Assistant::ToolResources)) - end - def tool_resources=(_) - end - - sig { returns(T.nilable(Float)) } - def top_p - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def top_p=(_) - end - - sig do - params( - id: String, - created_at: Integer, - description: T.nilable(String), - instructions: T.nilable(String), - metadata: T.nilable(OpenAI::Models::Metadata), - model: String, - name: T.nilable(String), - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ], - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_resources: T.nilable(OpenAI::Models::Beta::Assistant::ToolResources), - top_p: T.nilable(Float), - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - created_at:, - description:, - instructions:, - metadata:, - model:, - name:, - tools:, - response_format: nil, - temperature: nil, - tool_resources: nil, - top_p: nil, - object: :assistant - ) - end - - sig do - override - .returns( - { - id: String, - created_at: Integer, - description: T.nilable(String), - instructions: T.nilable(String), - metadata: T.nilable(OpenAI::Models::Metadata), - model: String, - name: T.nilable(String), - object: Symbol, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ], - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_resources: T.nilable(OpenAI::Models::Beta::Assistant::ToolResources), - top_p: T.nilable(Float) - } - ) - end - def to_hash - end - - class ToolResources < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter)) } - def code_interpreter - end - - sig do - params(_: OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter) - .returns(OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter) - end - def code_interpreter=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Assistant::ToolResources::FileSearch)) } - def file_search - end - - sig do - params(_: OpenAI::Models::Beta::Assistant::ToolResources::FileSearch) - .returns(OpenAI::Models::Beta::Assistant::ToolResources::FileSearch) - end - def file_search=(_) - end - - sig do - params( - code_interpreter: OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::Assistant::ToolResources::FileSearch - ) - .returns(T.attached_class) - end - def self.new(code_interpreter: nil, file_search: nil) - end - - sig do - override - .returns( - { - code_interpreter: OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::Assistant::ToolResources::FileSearch - } - ) - end - def to_hash - end - - class CodeInterpreter < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { params(file_ids: T::Array[String]).returns(T.attached_class) } - def self.new(file_ids: nil) - end - - sig { override.returns({file_ids: T::Array[String]}) } - def to_hash - end - end - - class FileSearch < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def vector_store_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def vector_store_ids=(_) - end - - sig { params(vector_store_ids: T::Array[String]).returns(T.attached_class) } - def self.new(vector_store_ids: nil) - end - - sig { override.returns({vector_store_ids: T::Array[String]}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_create_params.rbi b/rbi/lib/openai/models/beta/assistant_create_params.rbi deleted file mode 100644 index 9287af82..00000000 --- a/rbi/lib/openai/models/beta/assistant_create_params.rbi +++ /dev/null @@ -1,561 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(String, Symbol)) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig { returns(T.nilable(String)) } - def description - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def description=(_) - end - - sig { returns(T.nilable(String)) } - def instructions - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def instructions=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def name=(_) - end - - sig { returns(T.nilable(Symbol)) } - def reasoning_effort - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def reasoning_effort=(_) - end - - sig do - returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format - end - - sig do - params( - _: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - .returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def temperature=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources)) } - def tool_resources - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources)) - .returns(T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources)) - end - def tool_resources=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - end - def tools=(_) - end - - sig { returns(T.nilable(Float)) } - def top_p - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def top_p=(_) - end - - sig do - params( - model: T.any(String, Symbol), - description: T.nilable(String), - instructions: T.nilable(String), - metadata: T.nilable(OpenAI::Models::Metadata), - name: T.nilable(String), - reasoning_effort: T.nilable(Symbol), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_resources: T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources), - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ], - top_p: T.nilable(Float), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - model:, - description: nil, - instructions: nil, - metadata: nil, - name: nil, - reasoning_effort: nil, - response_format: nil, - temperature: nil, - tool_resources: nil, - tools: nil, - top_p: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - model: T.any(String, Symbol), - description: T.nilable(String), - instructions: T.nilable(String), - metadata: T.nilable(OpenAI::Models::Metadata), - name: T.nilable(String), - reasoning_effort: T.nilable(Symbol), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_resources: T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources), - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ], - top_p: T.nilable(Float), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class ToolResources < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter)) } - def code_interpreter - end - - sig do - params(_: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter) - .returns(OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter) - end - def code_interpreter=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch)) } - def file_search - end - - sig do - params(_: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch) - .returns(OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch) - end - def file_search=(_) - end - - sig do - params( - code_interpreter: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch - ) - .returns(T.attached_class) - end - def self.new(code_interpreter: nil, file_search: nil) - end - - sig do - override - .returns( - { - code_interpreter: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch - } - ) - end - def to_hash - end - - class CodeInterpreter < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { params(file_ids: T::Array[String]).returns(T.attached_class) } - def self.new(file_ids: nil) - end - - sig { override.returns({file_ids: T::Array[String]}) } - def to_hash - end - end - - class FileSearch < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def vector_store_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def vector_store_ids=(_) - end - - sig do - returns( - T.nilable(T::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore]) - ) - end - def vector_stores - end - - sig do - params(_: T::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore]) - .returns(T::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore]) - end - def vector_stores=(_) - end - - sig do - params( - vector_store_ids: T::Array[String], - vector_stores: T::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] - ) - .returns(T.attached_class) - end - def self.new(vector_store_ids: nil, vector_stores: nil) - end - - sig do - override - .returns( - { - vector_store_ids: T::Array[String], - vector_stores: T::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] - } - ) - end - def to_hash - end - - class VectorStore < OpenAI::BaseModel - sig do - returns( - T.nilable( - T.any( - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ) - ) - ) - end - def chunking_strategy - end - - sig do - params( - _: T.any( - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ) - ) - .returns( - T.any( - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ) - ) - end - def chunking_strategy=(_) - end - - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - chunking_strategy: T.any( - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ), - file_ids: T::Array[String], - metadata: T.nilable(OpenAI::Models::Metadata) - ) - .returns(T.attached_class) - end - def self.new(chunking_strategy: nil, file_ids: nil, metadata: nil) - end - - sig do - override - .returns( - { - chunking_strategy: T.any( - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ), - file_ids: T::Array[String], - metadata: T.nilable(OpenAI::Models::Metadata) - } - ) - end - def to_hash - end - - class ChunkingStrategy < OpenAI::Union - abstract! - - class Auto < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :auto) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - - class Static < OpenAI::BaseModel - sig do - returns( - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static - ) - end - def static - end - - sig do - params( - _: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static - ) - .returns( - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static - ) - end - def static=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - static: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(static:, type: :static) - end - - sig do - override - .returns( - { - static: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, - type: Symbol - } - ) - end - def to_hash - end - - class Static < OpenAI::BaseModel - sig { returns(Integer) } - def chunk_overlap_tokens - end - - sig { params(_: Integer).returns(Integer) } - def chunk_overlap_tokens=(_) - end - - sig { returns(Integer) } - def max_chunk_size_tokens - end - - sig { params(_: Integer).returns(Integer) } - def max_chunk_size_tokens=(_) - end - - sig do - params( - chunk_overlap_tokens: Integer, - max_chunk_size_tokens: Integer - ).returns(T.attached_class) - end - def self.new(chunk_overlap_tokens:, max_chunk_size_tokens:) - end - - sig { override.returns({chunk_overlap_tokens: Integer, max_chunk_size_tokens: Integer}) } - def to_hash - end - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [Symbol, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] - ) - end - private def variants - end - end - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_delete_params.rbi b/rbi/lib/openai/models/beta/assistant_delete_params.rbi deleted file mode 100644 index f6e02e32..00000000 --- a/rbi/lib/openai/models/beta/assistant_delete_params.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_deleted.rbi b/rbi/lib/openai/models/beta/assistant_deleted.rbi deleted file mode 100644 index bf4cde08..00000000 --- a/rbi/lib/openai/models/beta/assistant_deleted.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantDeleted < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Boolean) } - def deleted - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def deleted=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { params(id: String, deleted: T::Boolean, object: Symbol).returns(T.attached_class) } - def self.new(id:, deleted:, object: :"assistant.deleted") - end - - sig { override.returns({id: String, deleted: T::Boolean, object: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_list_params.rbi b/rbi/lib/openai/models/beta/assistant_list_params.rbi deleted file mode 100644 index 8653e3c0..00000000 --- a/rbi/lib/openai/models/beta/assistant_list_params.rbi +++ /dev/null @@ -1,85 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(String)) } - def before - end - - sig { params(_: String).returns(String) } - def before=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig do - params( - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, before: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - override - .returns( - { - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_response_format_option.rbi b/rbi/lib/openai/models/beta/assistant_response_format_option.rbi deleted file mode 100644 index 1e789450..00000000 --- a/rbi/lib/openai/models/beta/assistant_response_format_option.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantResponseFormatOption < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[NilClass, Symbol], [NilClass, OpenAI::Models::ResponseFormatText], [NilClass, OpenAI::Models::ResponseFormatJSONObject], [NilClass, OpenAI::Models::ResponseFormatJSONSchema]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_retrieve_params.rbi b/rbi/lib/openai/models/beta/assistant_retrieve_params.rbi deleted file mode 100644 index 493dc3a9..00000000 --- a/rbi/lib/openai/models/beta/assistant_retrieve_params.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_stream_event.rbi b/rbi/lib/openai/models/beta/assistant_stream_event.rbi deleted file mode 100644 index 7ac9987b..00000000 --- a/rbi/lib/openai/models/beta/assistant_stream_event.rbi +++ /dev/null @@ -1,681 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantStreamEvent < OpenAI::Union - abstract! - - class ThreadCreated < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Thread) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Thread).returns(OpenAI::Models::Beta::Thread) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def enabled - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def enabled=(_) - end - - sig { params(data: OpenAI::Models::Beta::Thread, enabled: T::Boolean, event: Symbol).returns(T.attached_class) } - def self.new(data:, enabled: nil, event: :"thread.created") - end - - sig { override.returns({data: OpenAI::Models::Beta::Thread, event: Symbol, enabled: T::Boolean}) } - def to_hash - end - end - - class ThreadRunCreated < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.created") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunQueued < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.queued") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunInProgress < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.in_progress") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunRequiresAction < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.requires_action") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunCompleted < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.completed") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunIncomplete < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.incomplete") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunFailed < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.failed") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunCancelling < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.cancelling") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunCancelled < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.cancelled") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunExpired < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.expired") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepCreated < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.created") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepInProgress < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.in_progress") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepDelta < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig do - params(data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, event: Symbol) - .returns(T.attached_class) - end - def self.new(data:, event: :"thread.run.step.delta") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepCompleted < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.completed") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepFailed < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.failed") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepCancelled < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.cancelled") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepExpired < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.expired") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadMessageCreated < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Message) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Message).returns(OpenAI::Models::Beta::Threads::Message) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.message.created") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Message, event: Symbol}) } - def to_hash - end - end - - class ThreadMessageInProgress < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Message) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Message).returns(OpenAI::Models::Beta::Threads::Message) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.message.in_progress") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Message, event: Symbol}) } - def to_hash - end - end - - class ThreadMessageDelta < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::MessageDeltaEvent) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::MessageDeltaEvent) - .returns(OpenAI::Models::Beta::Threads::MessageDeltaEvent) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.message.delta") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, event: Symbol}) } - def to_hash - end - end - - class ThreadMessageCompleted < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Message) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Message).returns(OpenAI::Models::Beta::Threads::Message) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.message.completed") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Message, event: Symbol}) } - def to_hash - end - end - - class ThreadMessageIncomplete < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Message) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Message).returns(OpenAI::Models::Beta::Threads::Message) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.message.incomplete") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Message, event: Symbol}) } - def to_hash - end - end - - class ErrorEvent < OpenAI::BaseModel - sig { returns(OpenAI::Models::ErrorObject) } - def data - end - - sig { params(_: OpenAI::Models::ErrorObject).returns(OpenAI::Models::ErrorObject) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::ErrorObject, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :error) - end - - sig { override.returns({data: OpenAI::Models::ErrorObject, event: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete], [Symbol, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_tool.rbi b/rbi/lib/openai/models/beta/assistant_tool.rbi deleted file mode 100644 index 18612436..00000000 --- a/rbi/lib/openai/models/beta/assistant_tool.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantTool < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::CodeInterpreterTool], [Symbol, OpenAI::Models::Beta::FileSearchTool], [Symbol, OpenAI::Models::Beta::FunctionTool]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice.rbi deleted file mode 100644 index 7d82ebcf..00000000 --- a/rbi/lib/openai/models/beta/assistant_tool_choice.rbi +++ /dev/null @@ -1,52 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantToolChoice < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::AssistantToolChoiceFunction)) } - def function - end - - sig do - params(_: OpenAI::Models::Beta::AssistantToolChoiceFunction) - .returns(OpenAI::Models::Beta::AssistantToolChoiceFunction) - end - def function=(_) - end - - sig do - params(type: Symbol, function: OpenAI::Models::Beta::AssistantToolChoiceFunction).returns(T.attached_class) - end - def self.new(type:, function: nil) - end - - sig { override.returns({type: Symbol, function: OpenAI::Models::Beta::AssistantToolChoiceFunction}) } - def to_hash - end - - class Type < OpenAI::Enum - abstract! - - FUNCTION = :function - CODE_INTERPRETER = :code_interpreter - FILE_SEARCH = :file_search - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi deleted file mode 100644 index be6c8d7f..00000000 --- a/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi +++ /dev/null @@ -1,25 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantToolChoiceFunction < OpenAI::BaseModel - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { params(name: String).returns(T.attached_class) } - def self.new(name:) - end - - sig { override.returns({name: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi deleted file mode 100644 index 6ae486e1..00000000 --- a/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi +++ /dev/null @@ -1,31 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantToolChoiceOption < OpenAI::Union - abstract! - - class Auto < OpenAI::Enum - abstract! - - NONE = :none - AUTO = :auto - REQUIRED = :required - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, OpenAI::Models::Beta::AssistantToolChoice]]) } - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/assistant_update_params.rbi b/rbi/lib/openai/models/beta/assistant_update_params.rbi deleted file mode 100644 index 0c75a7fb..00000000 --- a/rbi/lib/openai/models/beta/assistant_update_params.rbi +++ /dev/null @@ -1,380 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class AssistantUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def description - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def description=(_) - end - - sig { returns(T.nilable(String)) } - def instructions - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def instructions=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(T.any(String, Symbol))) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def name=(_) - end - - sig { returns(T.nilable(Symbol)) } - def reasoning_effort - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def reasoning_effort=(_) - end - - sig do - returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format - end - - sig do - params( - _: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - .returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def temperature=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources)) } - def tool_resources - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources)) - .returns(T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources)) - end - def tool_resources=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - end - def tools=(_) - end - - sig { returns(T.nilable(Float)) } - def top_p - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def top_p=(_) - end - - sig do - params( - description: T.nilable(String), - instructions: T.nilable(String), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.any(String, Symbol), - name: T.nilable(String), - reasoning_effort: T.nilable(Symbol), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_resources: T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources), - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ], - top_p: T.nilable(Float), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - description: nil, - instructions: nil, - metadata: nil, - model: nil, - name: nil, - reasoning_effort: nil, - response_format: nil, - temperature: nil, - tool_resources: nil, - tools: nil, - top_p: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - description: T.nilable(String), - instructions: T.nilable(String), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.any(String, Symbol), - name: T.nilable(String), - reasoning_effort: T.nilable(Symbol), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_resources: T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources), - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ], - top_p: T.nilable(Float), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class AssistantSupportedModels < OpenAI::Enum - abstract! - - O3_MINI = :"o3-mini" - O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" - O1 = :o1 - O1_2024_12_17 = :"o1-2024-12-17" - GPT_4O = :"gpt-4o" - GPT_4O_2024_11_20 = :"gpt-4o-2024-11-20" - GPT_4O_2024_08_06 = :"gpt-4o-2024-08-06" - GPT_4O_2024_05_13 = :"gpt-4o-2024-05-13" - GPT_4O_MINI = :"gpt-4o-mini" - GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18" - GPT_4_5_PREVIEW = :"gpt-4.5-preview" - GPT_4_5_PREVIEW_2025_02_27 = :"gpt-4.5-preview-2025-02-27" - GPT_4_TURBO = :"gpt-4-turbo" - GPT_4_TURBO_2024_04_09 = :"gpt-4-turbo-2024-04-09" - GPT_4_0125_PREVIEW = :"gpt-4-0125-preview" - GPT_4_TURBO_PREVIEW = :"gpt-4-turbo-preview" - GPT_4_1106_PREVIEW = :"gpt-4-1106-preview" - GPT_4_VISION_PREVIEW = :"gpt-4-vision-preview" - GPT_4 = :"gpt-4" - GPT_4_0314 = :"gpt-4-0314" - GPT_4_0613 = :"gpt-4-0613" - GPT_4_32K = :"gpt-4-32k" - GPT_4_32K_0314 = :"gpt-4-32k-0314" - GPT_4_32K_0613 = :"gpt-4-32k-0613" - GPT_3_5_TURBO = :"gpt-3.5-turbo" - GPT_3_5_TURBO_16K = :"gpt-3.5-turbo-16k" - GPT_3_5_TURBO_0613 = :"gpt-3.5-turbo-0613" - GPT_3_5_TURBO_1106 = :"gpt-3.5-turbo-1106" - GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125" - GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class ToolResources < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter)) } - def code_interpreter - end - - sig do - params(_: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter) - .returns(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter) - end - def code_interpreter=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch)) } - def file_search - end - - sig do - params(_: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch) - .returns(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch) - end - def file_search=(_) - end - - sig do - params( - code_interpreter: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch - ) - .returns(T.attached_class) - end - def self.new(code_interpreter: nil, file_search: nil) - end - - sig do - override - .returns( - { - code_interpreter: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch - } - ) - end - def to_hash - end - - class CodeInterpreter < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { params(file_ids: T::Array[String]).returns(T.attached_class) } - def self.new(file_ids: nil) - end - - sig { override.returns({file_ids: T::Array[String]}) } - def to_hash - end - end - - class FileSearch < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def vector_store_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def vector_store_ids=(_) - end - - sig { params(vector_store_ids: T::Array[String]).returns(T.attached_class) } - def self.new(vector_store_ids: nil) - end - - sig { override.returns({vector_store_ids: T::Array[String]}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/code_interpreter_tool.rbi b/rbi/lib/openai/models/beta/code_interpreter_tool.rbi deleted file mode 100644 index a560387e..00000000 --- a/rbi/lib/openai/models/beta/code_interpreter_tool.rbi +++ /dev/null @@ -1,25 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class CodeInterpreterTool < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :code_interpreter) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/file_search_tool.rbi b/rbi/lib/openai/models/beta/file_search_tool.rbi deleted file mode 100644 index a7d4bbaf..00000000 --- a/rbi/lib/openai/models/beta/file_search_tool.rbi +++ /dev/null @@ -1,118 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class FileSearchTool < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::FileSearchTool::FileSearch)) } - def file_search - end - - sig do - params(_: OpenAI::Models::Beta::FileSearchTool::FileSearch) - .returns(OpenAI::Models::Beta::FileSearchTool::FileSearch) - end - def file_search=(_) - end - - sig do - params(file_search: OpenAI::Models::Beta::FileSearchTool::FileSearch, type: Symbol) - .returns(T.attached_class) - end - def self.new(file_search: nil, type: :file_search) - end - - sig { override.returns({type: Symbol, file_search: OpenAI::Models::Beta::FileSearchTool::FileSearch}) } - def to_hash - end - - class FileSearch < OpenAI::BaseModel - sig { returns(T.nilable(Integer)) } - def max_num_results - end - - sig { params(_: Integer).returns(Integer) } - def max_num_results=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions)) } - def ranking_options - end - - sig do - params(_: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions) - .returns(OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions) - end - def ranking_options=(_) - end - - sig do - params( - max_num_results: Integer, - ranking_options: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions - ) - .returns(T.attached_class) - end - def self.new(max_num_results: nil, ranking_options: nil) - end - - sig do - override - .returns( - {max_num_results: Integer, ranking_options: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions} - ) - end - def to_hash - end - - class RankingOptions < OpenAI::BaseModel - sig { returns(Float) } - def score_threshold - end - - sig { params(_: Float).returns(Float) } - def score_threshold=(_) - end - - sig { returns(T.nilable(Symbol)) } - def ranker - end - - sig { params(_: Symbol).returns(Symbol) } - def ranker=(_) - end - - sig { params(score_threshold: Float, ranker: Symbol).returns(T.attached_class) } - def self.new(score_threshold:, ranker: nil) - end - - sig { override.returns({score_threshold: Float, ranker: Symbol}) } - def to_hash - end - - class Ranker < OpenAI::Enum - abstract! - - AUTO = :auto - DEFAULT_2024_08_21 = :default_2024_08_21 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/function_tool.rbi b/rbi/lib/openai/models/beta/function_tool.rbi deleted file mode 100644 index 0c7a758f..00000000 --- a/rbi/lib/openai/models/beta/function_tool.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class FunctionTool < OpenAI::BaseModel - sig { returns(OpenAI::Models::FunctionDefinition) } - def function - end - - sig { params(_: OpenAI::Models::FunctionDefinition).returns(OpenAI::Models::FunctionDefinition) } - def function=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(function: OpenAI::Models::FunctionDefinition, type: Symbol).returns(T.attached_class) } - def self.new(function:, type: :function) - end - - sig { override.returns({function: OpenAI::Models::FunctionDefinition, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/message_stream_event.rbi b/rbi/lib/openai/models/beta/message_stream_event.rbi deleted file mode 100644 index 135308ce..00000000 --- a/rbi/lib/openai/models/beta/message_stream_event.rbi +++ /dev/null @@ -1,155 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class MessageStreamEvent < OpenAI::Union - abstract! - - class ThreadMessageCreated < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Message) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Message).returns(OpenAI::Models::Beta::Threads::Message) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.message.created") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Message, event: Symbol}) } - def to_hash - end - end - - class ThreadMessageInProgress < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Message) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Message).returns(OpenAI::Models::Beta::Threads::Message) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.message.in_progress") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Message, event: Symbol}) } - def to_hash - end - end - - class ThreadMessageDelta < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::MessageDeltaEvent) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::MessageDeltaEvent) - .returns(OpenAI::Models::Beta::Threads::MessageDeltaEvent) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.message.delta") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, event: Symbol}) } - def to_hash - end - end - - class ThreadMessageCompleted < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Message) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Message).returns(OpenAI::Models::Beta::Threads::Message) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.message.completed") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Message, event: Symbol}) } - def to_hash - end - end - - class ThreadMessageIncomplete < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Message) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Message).returns(OpenAI::Models::Beta::Threads::Message) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.message.incomplete") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Message, event: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated], [Symbol, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress], [Symbol, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta], [Symbol, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted], [Symbol, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/run_step_stream_event.rbi b/rbi/lib/openai/models/beta/run_step_stream_event.rbi deleted file mode 100644 index 1adb063a..00000000 --- a/rbi/lib/openai/models/beta/run_step_stream_event.rbi +++ /dev/null @@ -1,228 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class RunStepStreamEvent < OpenAI::Union - abstract! - - class ThreadRunStepCreated < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.created") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepInProgress < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.in_progress") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepDelta < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig do - params(data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, event: Symbol) - .returns(T.attached_class) - end - def self.new(data:, event: :"thread.run.step.delta") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepCompleted < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.completed") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepFailed < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.failed") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepCancelled < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.cancelled") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class ThreadRunStepExpired < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } - def data - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStep) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.step.expired") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled], [Symbol, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/run_stream_event.rbi b/rbi/lib/openai/models/beta/run_stream_event.rbi deleted file mode 100644 index 47d3fd71..00000000 --- a/rbi/lib/openai/models/beta/run_stream_event.rbi +++ /dev/null @@ -1,282 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class RunStreamEvent < OpenAI::Union - abstract! - - class ThreadRunCreated < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.created") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunQueued < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.queued") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunInProgress < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.in_progress") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunRequiresAction < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.requires_action") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunCompleted < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.completed") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunIncomplete < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.incomplete") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunFailed < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.failed") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunCancelling < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.cancelling") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunCancelled < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.cancelled") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class ThreadRunExpired < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Threads::Run).returns(OpenAI::Models::Beta::Threads::Run) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } - def self.new(data:, event: :"thread.run.expired") - end - - sig { override.returns({data: OpenAI::Models::Beta::Threads::Run, event: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled], [Symbol, OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/thread.rbi b/rbi/lib/openai/models/beta/thread.rbi deleted file mode 100644 index f7263e57..00000000 --- a/rbi/lib/openai/models/beta/thread.rbi +++ /dev/null @@ -1,162 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class Thread < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Thread::ToolResources)) } - def tool_resources - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Thread::ToolResources)) - .returns(T.nilable(OpenAI::Models::Beta::Thread::ToolResources)) - end - def tool_resources=(_) - end - - sig do - params( - id: String, - created_at: Integer, - metadata: T.nilable(OpenAI::Models::Metadata), - tool_resources: T.nilable(OpenAI::Models::Beta::Thread::ToolResources), - object: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, created_at:, metadata:, tool_resources:, object: :thread) - end - - sig do - override - .returns( - { - id: String, - created_at: Integer, - metadata: T.nilable(OpenAI::Models::Metadata), - object: Symbol, - tool_resources: T.nilable(OpenAI::Models::Beta::Thread::ToolResources) - } - ) - end - def to_hash - end - - class ToolResources < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter)) } - def code_interpreter - end - - sig do - params(_: OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter) - .returns(OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter) - end - def code_interpreter=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Thread::ToolResources::FileSearch)) } - def file_search - end - - sig do - params(_: OpenAI::Models::Beta::Thread::ToolResources::FileSearch) - .returns(OpenAI::Models::Beta::Thread::ToolResources::FileSearch) - end - def file_search=(_) - end - - sig do - params( - code_interpreter: OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::Thread::ToolResources::FileSearch - ) - .returns(T.attached_class) - end - def self.new(code_interpreter: nil, file_search: nil) - end - - sig do - override - .returns( - { - code_interpreter: OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::Thread::ToolResources::FileSearch - } - ) - end - def to_hash - end - - class CodeInterpreter < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { params(file_ids: T::Array[String]).returns(T.attached_class) } - def self.new(file_ids: nil) - end - - sig { override.returns({file_ids: T::Array[String]}) } - def to_hash - end - end - - class FileSearch < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def vector_store_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def vector_store_ids=(_) - end - - sig { params(vector_store_ids: T::Array[String]).returns(T.attached_class) } - def self.new(vector_store_ids: nil) - end - - sig { override.returns({vector_store_ids: T::Array[String]}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi b/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi deleted file mode 100644 index d0dd117f..00000000 --- a/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi +++ /dev/null @@ -1,1104 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class ThreadCreateAndRunParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def assistant_id - end - - sig { params(_: String).returns(String) } - def assistant_id=(_) - end - - sig { returns(T.nilable(String)) } - def instructions - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def instructions=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_completion_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_completion_tokens=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_prompt_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_prompt_tokens=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(T.any(String, Symbol))) } - def model - end - - sig { params(_: T.nilable(T.any(String, Symbol))).returns(T.nilable(T.any(String, Symbol))) } - def model=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def parallel_tool_calls - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def parallel_tool_calls=(_) - end - - sig do - returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format - end - - sig do - params( - _: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - .returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def temperature=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread)) } - def thread - end - - sig do - params(_: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread) - .returns(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread) - end - def thread=(_) - end - - sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) } - def tool_choice - end - - sig do - params(_: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) - .returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) - end - def tool_choice=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources)) } - def tool_resources - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources)) - .returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources)) - end - def tool_resources=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - ) - end - def tools - end - - sig do - params( - _: T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - ) - .returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - ) - end - def tools=(_) - end - - sig { returns(T.nilable(Float)) } - def top_p - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def top_p=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy)) } - def truncation_strategy - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy)) - .returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy)) - end - def truncation_strategy=(_) - end - - sig do - params( - assistant_id: String, - instructions: T.nilable(String), - max_completion_tokens: T.nilable(Integer), - max_prompt_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.nilable(T.any(String, Symbol)), - parallel_tool_calls: T::Boolean, - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - thread: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, - tool_choice: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources), - tools: T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ), - top_p: T.nilable(Float), - truncation_strategy: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - assistant_id:, - instructions: nil, - max_completion_tokens: nil, - max_prompt_tokens: nil, - metadata: nil, - model: nil, - parallel_tool_calls: nil, - response_format: nil, - temperature: nil, - thread: nil, - tool_choice: nil, - tool_resources: nil, - tools: nil, - top_p: nil, - truncation_strategy: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - assistant_id: String, - instructions: T.nilable(String), - max_completion_tokens: T.nilable(Integer), - max_prompt_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.nilable(T.any(String, Symbol)), - parallel_tool_calls: T::Boolean, - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - thread: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, - tool_choice: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources), - tools: T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ), - top_p: T.nilable(Float), - truncation_strategy: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class Thread < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message])) } - def messages - end - - sig do - params(_: T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message]) - .returns(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message]) - end - def messages=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources)) } - def tool_resources - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources)) - .returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources)) - end - def tool_resources=(_) - end - - sig do - params( - messages: T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message], - metadata: T.nilable(OpenAI::Models::Metadata), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources) - ) - .returns(T.attached_class) - end - def self.new(messages: nil, metadata: nil, tool_resources: nil) - end - - sig do - override - .returns( - { - messages: T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message], - metadata: T.nilable(OpenAI::Models::Metadata), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources) - } - ) - end - def to_hash - end - - class Message < OpenAI::BaseModel - sig do - returns( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - end - def content - end - - sig do - params( - _: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - .returns( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment])) } - def attachments - end - - sig do - params( - _: T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]) - ) - .returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment])) - end - def attachments=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ), - role: Symbol, - attachments: T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]), - metadata: T.nilable(OpenAI::Models::Metadata) - ) - .returns(T.attached_class) - end - def self.new(content:, role:, attachments: nil, metadata: nil) - end - - sig do - override - .returns( - { - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ), - role: Symbol, - attachments: T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]), - metadata: T.nilable(OpenAI::Models::Metadata) - } - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - MessageContentPartParamArray = T.type_alias do - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ] - ] - ) - end - private def variants - end - end - end - - class Role < OpenAI::Enum - abstract! - - USER = :user - ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Attachment < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch - ) - ] - ) - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch - ) - ] - ) - end - def tools=(_) - end - - sig do - params( - file_id: String, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch - ) - ] - ) - .returns(T.attached_class) - end - def self.new(file_id: nil, tools: nil) - end - - sig do - override - .returns( - { - file_id: String, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch - ) - ] - } - ) - end - def to_hash - end - - class Tool < OpenAI::Union - abstract! - - class FileSearch < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :file_search) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::CodeInterpreterTool], [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch]] - ) - end - private def variants - end - end - end - end - end - - class ToolResources < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter)) } - def code_interpreter - end - - sig do - params(_: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter) - .returns(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter) - end - def code_interpreter=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch)) } - def file_search - end - - sig do - params(_: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch) - .returns(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch) - end - def file_search=(_) - end - - sig do - params( - code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch - ) - .returns(T.attached_class) - end - def self.new(code_interpreter: nil, file_search: nil) - end - - sig do - override - .returns( - { - code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch - } - ) - end - def to_hash - end - - class CodeInterpreter < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { params(file_ids: T::Array[String]).returns(T.attached_class) } - def self.new(file_ids: nil) - end - - sig { override.returns({file_ids: T::Array[String]}) } - def to_hash - end - end - - class FileSearch < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def vector_store_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def vector_store_ids=(_) - end - - sig do - returns( - T.nilable( - T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] - ) - ) - end - def vector_stores - end - - sig do - params( - _: T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] - ) - .returns( - T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] - ) - end - def vector_stores=(_) - end - - sig do - params( - vector_store_ids: T::Array[String], - vector_stores: T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] - ) - .returns(T.attached_class) - end - def self.new(vector_store_ids: nil, vector_stores: nil) - end - - sig do - override - .returns( - { - vector_store_ids: T::Array[String], - vector_stores: T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] - } - ) - end - def to_hash - end - - class VectorStore < OpenAI::BaseModel - sig do - returns( - T.nilable( - T.any( - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ) - ) - ) - end - def chunking_strategy - end - - sig do - params( - _: T.any( - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ) - ) - .returns( - T.any( - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ) - ) - end - def chunking_strategy=(_) - end - - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - chunking_strategy: T.any( - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ), - file_ids: T::Array[String], - metadata: T.nilable(OpenAI::Models::Metadata) - ) - .returns(T.attached_class) - end - def self.new(chunking_strategy: nil, file_ids: nil, metadata: nil) - end - - sig do - override - .returns( - { - chunking_strategy: T.any( - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ), - file_ids: T::Array[String], - metadata: T.nilable(OpenAI::Models::Metadata) - } - ) - end - def to_hash - end - - class ChunkingStrategy < OpenAI::Union - abstract! - - class Auto < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :auto) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - - class Static < OpenAI::BaseModel - sig do - returns( - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static - ) - end - def static - end - - sig do - params( - _: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static - ) - .returns( - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static - ) - end - def static=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - static: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(static:, type: :static) - end - - sig do - override - .returns( - { - static: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, - type: Symbol - } - ) - end - def to_hash - end - - class Static < OpenAI::BaseModel - sig { returns(Integer) } - def chunk_overlap_tokens - end - - sig { params(_: Integer).returns(Integer) } - def chunk_overlap_tokens=(_) - end - - sig { returns(Integer) } - def max_chunk_size_tokens - end - - sig { params(_: Integer).returns(Integer) } - def max_chunk_size_tokens=(_) - end - - sig do - params( - chunk_overlap_tokens: Integer, - max_chunk_size_tokens: Integer - ).returns(T.attached_class) - end - def self.new(chunk_overlap_tokens:, max_chunk_size_tokens:) - end - - sig do - override.returns({chunk_overlap_tokens: Integer, max_chunk_size_tokens: Integer}) - end - def to_hash - end - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] - ) - end - private def variants - end - end - end - end - end - end - end - - class ToolResources < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter)) } - def code_interpreter - end - - sig do - params(_: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter) - .returns(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter) - end - def code_interpreter=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch)) } - def file_search - end - - sig do - params(_: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch) - .returns(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch) - end - def file_search=(_) - end - - sig do - params( - code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch - ) - .returns(T.attached_class) - end - def self.new(code_interpreter: nil, file_search: nil) - end - - sig do - override - .returns( - { - code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch - } - ) - end - def to_hash - end - - class CodeInterpreter < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { params(file_ids: T::Array[String]).returns(T.attached_class) } - def self.new(file_ids: nil) - end - - sig { override.returns({file_ids: T::Array[String]}) } - def to_hash - end - end - - class FileSearch < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def vector_store_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def vector_store_ids=(_) - end - - sig { params(vector_store_ids: T::Array[String]).returns(T.attached_class) } - def self.new(vector_store_ids: nil) - end - - sig { override.returns({vector_store_ids: T::Array[String]}) } - def to_hash - end - end - end - - class Tool < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::Beta::CodeInterpreterTool], [NilClass, OpenAI::Models::Beta::FileSearchTool], [NilClass, OpenAI::Models::Beta::FunctionTool]] - ) - end - private def variants - end - end - end - - class TruncationStrategy < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(Integer)) } - def last_messages - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def last_messages=(_) - end - - sig { params(type: Symbol, last_messages: T.nilable(Integer)).returns(T.attached_class) } - def self.new(type:, last_messages: nil) - end - - sig { override.returns({type: Symbol, last_messages: T.nilable(Integer)}) } - def to_hash - end - - class Type < OpenAI::Enum - abstract! - - AUTO = :auto - LAST_MESSAGES = :last_messages - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/thread_create_params.rbi b/rbi/lib/openai/models/beta/thread_create_params.rbi deleted file mode 100644 index 627808ef..00000000 --- a/rbi/lib/openai/models/beta/thread_create_params.rbi +++ /dev/null @@ -1,652 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class ThreadCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message])) } - def messages - end - - sig do - params(_: T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message]) - .returns(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message]) - end - def messages=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources)) } - def tool_resources - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources)) - .returns(T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources)) - end - def tool_resources=(_) - end - - sig do - params( - messages: T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message], - metadata: T.nilable(OpenAI::Models::Metadata), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(messages: nil, metadata: nil, tool_resources: nil, request_options: {}) - end - - sig do - override - .returns( - { - messages: T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message], - metadata: T.nilable(OpenAI::Models::Metadata), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Message < OpenAI::BaseModel - sig do - returns( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - end - def content - end - - sig do - params( - _: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - .returns( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment])) } - def attachments - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment])) - .returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment])) - end - def attachments=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ), - role: Symbol, - attachments: T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment]), - metadata: T.nilable(OpenAI::Models::Metadata) - ) - .returns(T.attached_class) - end - def self.new(content:, role:, attachments: nil, metadata: nil) - end - - sig do - override - .returns( - { - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ), - role: Symbol, - attachments: T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment]), - metadata: T.nilable(OpenAI::Models::Metadata) - } - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - MessageContentPartParamArray = T.type_alias do - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ] - ] - ) - end - private def variants - end - end - end - - class Role < OpenAI::Enum - abstract! - - USER = :user - ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Attachment < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch - ) - ] - ) - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch - ) - ] - ) - end - def tools=(_) - end - - sig do - params( - file_id: String, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch - ) - ] - ) - .returns(T.attached_class) - end - def self.new(file_id: nil, tools: nil) - end - - sig do - override - .returns( - { - file_id: String, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch - ) - ] - } - ) - end - def to_hash - end - - class Tool < OpenAI::Union - abstract! - - class FileSearch < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :file_search) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::CodeInterpreterTool], [Symbol, OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch]] - ) - end - private def variants - end - end - end - end - end - - class ToolResources < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter)) } - def code_interpreter - end - - sig do - params(_: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter) - .returns(OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter) - end - def code_interpreter=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch)) } - def file_search - end - - sig do - params(_: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch) - .returns(OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch) - end - def file_search=(_) - end - - sig do - params( - code_interpreter: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch - ) - .returns(T.attached_class) - end - def self.new(code_interpreter: nil, file_search: nil) - end - - sig do - override - .returns( - { - code_interpreter: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch - } - ) - end - def to_hash - end - - class CodeInterpreter < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { params(file_ids: T::Array[String]).returns(T.attached_class) } - def self.new(file_ids: nil) - end - - sig { override.returns({file_ids: T::Array[String]}) } - def to_hash - end - end - - class FileSearch < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def vector_store_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def vector_store_ids=(_) - end - - sig do - returns( - T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore]) - ) - end - def vector_stores - end - - sig do - params(_: T::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore]) - .returns(T::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore]) - end - def vector_stores=(_) - end - - sig do - params( - vector_store_ids: T::Array[String], - vector_stores: T::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] - ) - .returns(T.attached_class) - end - def self.new(vector_store_ids: nil, vector_stores: nil) - end - - sig do - override - .returns( - { - vector_store_ids: T::Array[String], - vector_stores: T::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] - } - ) - end - def to_hash - end - - class VectorStore < OpenAI::BaseModel - sig do - returns( - T.nilable( - T.any( - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ) - ) - ) - end - def chunking_strategy - end - - sig do - params( - _: T.any( - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ) - ) - .returns( - T.any( - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ) - ) - end - def chunking_strategy=(_) - end - - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - chunking_strategy: T.any( - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ), - file_ids: T::Array[String], - metadata: T.nilable(OpenAI::Models::Metadata) - ) - .returns(T.attached_class) - end - def self.new(chunking_strategy: nil, file_ids: nil, metadata: nil) - end - - sig do - override - .returns( - { - chunking_strategy: T.any( - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static - ), - file_ids: T::Array[String], - metadata: T.nilable(OpenAI::Models::Metadata) - } - ) - end - def to_hash - end - - class ChunkingStrategy < OpenAI::Union - abstract! - - class Auto < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :auto) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - - class Static < OpenAI::BaseModel - sig do - returns( - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static - ) - end - def static - end - - sig do - params( - _: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static - ) - .returns( - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static - ) - end - def static=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - static: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(static:, type: :static) - end - - sig do - override - .returns( - { - static: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, - type: Symbol - } - ) - end - def to_hash - end - - class Static < OpenAI::BaseModel - sig { returns(Integer) } - def chunk_overlap_tokens - end - - sig { params(_: Integer).returns(Integer) } - def chunk_overlap_tokens=(_) - end - - sig { returns(Integer) } - def max_chunk_size_tokens - end - - sig { params(_: Integer).returns(Integer) } - def max_chunk_size_tokens=(_) - end - - sig do - params( - chunk_overlap_tokens: Integer, - max_chunk_size_tokens: Integer - ).returns(T.attached_class) - end - def self.new(chunk_overlap_tokens:, max_chunk_size_tokens:) - end - - sig { override.returns({chunk_overlap_tokens: Integer, max_chunk_size_tokens: Integer}) } - def to_hash - end - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [Symbol, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] - ) - end - private def variants - end - end - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/thread_delete_params.rbi b/rbi/lib/openai/models/beta/thread_delete_params.rbi deleted file mode 100644 index a47b1e53..00000000 --- a/rbi/lib/openai/models/beta/thread_delete_params.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class ThreadDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/thread_deleted.rbi b/rbi/lib/openai/models/beta/thread_deleted.rbi deleted file mode 100644 index 1ca71e29..00000000 --- a/rbi/lib/openai/models/beta/thread_deleted.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class ThreadDeleted < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Boolean) } - def deleted - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def deleted=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { params(id: String, deleted: T::Boolean, object: Symbol).returns(T.attached_class) } - def self.new(id:, deleted:, object: :"thread.deleted") - end - - sig { override.returns({id: String, deleted: T::Boolean, object: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/thread_retrieve_params.rbi b/rbi/lib/openai/models/beta/thread_retrieve_params.rbi deleted file mode 100644 index 07f1e477..00000000 --- a/rbi/lib/openai/models/beta/thread_retrieve_params.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class ThreadRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/thread_stream_event.rbi b/rbi/lib/openai/models/beta/thread_stream_event.rbi deleted file mode 100644 index df43a6d2..00000000 --- a/rbi/lib/openai/models/beta/thread_stream_event.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class ThreadStreamEvent < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Thread) } - def data - end - - sig { params(_: OpenAI::Models::Beta::Thread).returns(OpenAI::Models::Beta::Thread) } - def data=(_) - end - - sig { returns(Symbol) } - def event - end - - sig { params(_: Symbol).returns(Symbol) } - def event=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def enabled - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def enabled=(_) - end - - sig { params(data: OpenAI::Models::Beta::Thread, enabled: T::Boolean, event: Symbol).returns(T.attached_class) } - def self.new(data:, enabled: nil, event: :"thread.created") - end - - sig { override.returns({data: OpenAI::Models::Beta::Thread, event: Symbol, enabled: T::Boolean}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/thread_update_params.rbi b/rbi/lib/openai/models/beta/thread_update_params.rbi deleted file mode 100644 index f95411f9..00000000 --- a/rbi/lib/openai/models/beta/thread_update_params.rbi +++ /dev/null @@ -1,137 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - class ThreadUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources)) } - def tool_resources - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources)) - .returns(T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources)) - end - def tool_resources=(_) - end - - sig do - params( - metadata: T.nilable(OpenAI::Models::Metadata), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(metadata: nil, tool_resources: nil, request_options: {}) - end - - sig do - override - .returns( - { - metadata: T.nilable(OpenAI::Models::Metadata), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class ToolResources < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter)) } - def code_interpreter - end - - sig do - params(_: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter) - .returns(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter) - end - def code_interpreter=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch)) } - def file_search - end - - sig do - params(_: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch) - .returns(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch) - end - def file_search=(_) - end - - sig do - params( - code_interpreter: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch - ) - .returns(T.attached_class) - end - def self.new(code_interpreter: nil, file_search: nil) - end - - sig do - override - .returns( - { - code_interpreter: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch - } - ) - end - def to_hash - end - - class CodeInterpreter < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { params(file_ids: T::Array[String]).returns(T.attached_class) } - def self.new(file_ids: nil) - end - - sig { override.returns({file_ids: T::Array[String]}) } - def to_hash - end - end - - class FileSearch < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[String])) } - def vector_store_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def vector_store_ids=(_) - end - - sig { params(vector_store_ids: T::Array[String]).returns(T.attached_class) } - def self.new(vector_store_ids: nil) - end - - sig { override.returns({vector_store_ids: T::Array[String]}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/annotation.rbi b/rbi/lib/openai/models/beta/threads/annotation.rbi deleted file mode 100644 index 6510f8df..00000000 --- a/rbi/lib/openai/models/beta/threads/annotation.rbi +++ /dev/null @@ -1,24 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class Annotation < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::FileCitationAnnotation], [Symbol, OpenAI::Models::Beta::Threads::FilePathAnnotation]] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/annotation_delta.rbi b/rbi/lib/openai/models/beta/threads/annotation_delta.rbi deleted file mode 100644 index 81699c4b..00000000 --- a/rbi/lib/openai/models/beta/threads/annotation_delta.rbi +++ /dev/null @@ -1,24 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class AnnotationDelta < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation], [Symbol, OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation]] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi deleted file mode 100644 index 631db0a5..00000000 --- a/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi +++ /dev/null @@ -1,100 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class FileCitationAnnotation < OpenAI::BaseModel - sig { returns(Integer) } - def end_index - end - - sig { params(_: Integer).returns(Integer) } - def end_index=(_) - end - - sig { returns(OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation) } - def file_citation - end - - sig do - params(_: OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation) - .returns(OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation) - end - def file_citation=(_) - end - - sig { returns(Integer) } - def start_index - end - - sig { params(_: Integer).returns(Integer) } - def start_index=(_) - end - - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - end_index: Integer, - file_citation: OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation, - start_index: Integer, - text: String, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(end_index:, file_citation:, start_index:, text:, type: :file_citation) - end - - sig do - override - .returns( - { - end_index: Integer, - file_citation: OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation, - start_index: Integer, - text: String, - type: Symbol - } - ) - end - def to_hash - end - - class FileCitation < OpenAI::BaseModel - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { params(file_id: String).returns(T.attached_class) } - def self.new(file_id:) - end - - sig { override.returns({file_id: String}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi deleted file mode 100644 index f2e75732..00000000 --- a/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi +++ /dev/null @@ -1,118 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class FileCitationDeltaAnnotation < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(Integer)) } - def end_index - end - - sig { params(_: Integer).returns(Integer) } - def end_index=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation)) } - def file_citation - end - - sig do - params(_: OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation) - .returns(OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation) - end - def file_citation=(_) - end - - sig { returns(T.nilable(Integer)) } - def start_index - end - - sig { params(_: Integer).returns(Integer) } - def start_index=(_) - end - - sig { returns(T.nilable(String)) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig do - params( - index: Integer, - end_index: Integer, - file_citation: OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, - start_index: Integer, - text: String, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(index:, end_index: nil, file_citation: nil, start_index: nil, text: nil, type: :file_citation) - end - - sig do - override - .returns( - { - index: Integer, - type: Symbol, - end_index: Integer, - file_citation: OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, - start_index: Integer, - text: String - } - ) - end - def to_hash - end - - class FileCitation < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(T.nilable(String)) } - def quote - end - - sig { params(_: String).returns(String) } - def quote=(_) - end - - sig { params(file_id: String, quote: String).returns(T.attached_class) } - def self.new(file_id: nil, quote: nil) - end - - sig { override.returns({file_id: String, quote: String}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi deleted file mode 100644 index 0feca1b6..00000000 --- a/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi +++ /dev/null @@ -1,100 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class FilePathAnnotation < OpenAI::BaseModel - sig { returns(Integer) } - def end_index - end - - sig { params(_: Integer).returns(Integer) } - def end_index=(_) - end - - sig { returns(OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath) } - def file_path - end - - sig do - params(_: OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath) - .returns(OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath) - end - def file_path=(_) - end - - sig { returns(Integer) } - def start_index - end - - sig { params(_: Integer).returns(Integer) } - def start_index=(_) - end - - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - end_index: Integer, - file_path: OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath, - start_index: Integer, - text: String, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(end_index:, file_path:, start_index:, text:, type: :file_path) - end - - sig do - override - .returns( - { - end_index: Integer, - file_path: OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath, - start_index: Integer, - text: String, - type: Symbol - } - ) - end - def to_hash - end - - class FilePath < OpenAI::BaseModel - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { params(file_id: String).returns(T.attached_class) } - def self.new(file_id:) - end - - sig { override.returns({file_id: String}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi deleted file mode 100644 index f12cbc1f..00000000 --- a/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi +++ /dev/null @@ -1,110 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class FilePathDeltaAnnotation < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(Integer)) } - def end_index - end - - sig { params(_: Integer).returns(Integer) } - def end_index=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath)) } - def file_path - end - - sig do - params(_: OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath) - .returns(OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath) - end - def file_path=(_) - end - - sig { returns(T.nilable(Integer)) } - def start_index - end - - sig { params(_: Integer).returns(Integer) } - def start_index=(_) - end - - sig { returns(T.nilable(String)) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig do - params( - index: Integer, - end_index: Integer, - file_path: OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath, - start_index: Integer, - text: String, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(index:, end_index: nil, file_path: nil, start_index: nil, text: nil, type: :file_path) - end - - sig do - override - .returns( - { - index: Integer, - type: Symbol, - end_index: Integer, - file_path: OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath, - start_index: Integer, - text: String - } - ) - end - def to_hash - end - - class FilePath < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { params(file_id: String).returns(T.attached_class) } - def self.new(file_id: nil) - end - - sig { override.returns({file_id: String}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/image_file.rbi b/rbi/lib/openai/models/beta/threads/image_file.rbi deleted file mode 100644 index 5f75f9fc..00000000 --- a/rbi/lib/openai/models/beta/threads/image_file.rbi +++ /dev/null @@ -1,49 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class ImageFile < OpenAI::BaseModel - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(T.nilable(Symbol)) } - def detail - end - - sig { params(_: Symbol).returns(Symbol) } - def detail=(_) - end - - sig { params(file_id: String, detail: Symbol).returns(T.attached_class) } - def self.new(file_id:, detail: nil) - end - - sig { override.returns({file_id: String, detail: Symbol}) } - def to_hash - end - - class Detail < OpenAI::Enum - abstract! - - AUTO = :auto - LOW = :low - HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi b/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi deleted file mode 100644 index ebd1e9c5..00000000 --- a/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi +++ /dev/null @@ -1,35 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class ImageFileContentBlock < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::ImageFile) } - def image_file - end - - sig { params(_: OpenAI::Models::Beta::Threads::ImageFile).returns(OpenAI::Models::Beta::Threads::ImageFile) } - def image_file=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(image_file: OpenAI::Models::Beta::Threads::ImageFile, type: Symbol).returns(T.attached_class) } - def self.new(image_file:, type: :image_file) - end - - sig { override.returns({image_file: OpenAI::Models::Beta::Threads::ImageFile, type: Symbol}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/image_file_delta.rbi b/rbi/lib/openai/models/beta/threads/image_file_delta.rbi deleted file mode 100644 index 2f6a3435..00000000 --- a/rbi/lib/openai/models/beta/threads/image_file_delta.rbi +++ /dev/null @@ -1,49 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class ImageFileDelta < OpenAI::BaseModel - sig { returns(T.nilable(Symbol)) } - def detail - end - - sig { params(_: Symbol).returns(Symbol) } - def detail=(_) - end - - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { params(detail: Symbol, file_id: String).returns(T.attached_class) } - def self.new(detail: nil, file_id: nil) - end - - sig { override.returns({detail: Symbol, file_id: String}) } - def to_hash - end - - class Detail < OpenAI::Enum - abstract! - - AUTO = :auto - LOW = :low - HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi b/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi deleted file mode 100644 index dd93fbb6..00000000 --- a/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi +++ /dev/null @@ -1,51 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class ImageFileDeltaBlock < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::ImageFileDelta)) } - def image_file - end - - sig do - params(_: OpenAI::Models::Beta::Threads::ImageFileDelta) - .returns(OpenAI::Models::Beta::Threads::ImageFileDelta) - end - def image_file=(_) - end - - sig do - params(index: Integer, image_file: OpenAI::Models::Beta::Threads::ImageFileDelta, type: Symbol) - .returns(T.attached_class) - end - def self.new(index:, image_file: nil, type: :image_file) - end - - sig do - override.returns({index: Integer, type: Symbol, image_file: OpenAI::Models::Beta::Threads::ImageFileDelta}) - end - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/image_url.rbi b/rbi/lib/openai/models/beta/threads/image_url.rbi deleted file mode 100644 index 512367f1..00000000 --- a/rbi/lib/openai/models/beta/threads/image_url.rbi +++ /dev/null @@ -1,49 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class ImageURL < OpenAI::BaseModel - sig { returns(String) } - def url - end - - sig { params(_: String).returns(String) } - def url=(_) - end - - sig { returns(T.nilable(Symbol)) } - def detail - end - - sig { params(_: Symbol).returns(Symbol) } - def detail=(_) - end - - sig { params(url: String, detail: Symbol).returns(T.attached_class) } - def self.new(url:, detail: nil) - end - - sig { override.returns({url: String, detail: Symbol}) } - def to_hash - end - - class Detail < OpenAI::Enum - abstract! - - AUTO = :auto - LOW = :low - HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi b/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi deleted file mode 100644 index 13a1daf7..00000000 --- a/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi +++ /dev/null @@ -1,35 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class ImageURLContentBlock < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::ImageURL) } - def image_url - end - - sig { params(_: OpenAI::Models::Beta::Threads::ImageURL).returns(OpenAI::Models::Beta::Threads::ImageURL) } - def image_url=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(image_url: OpenAI::Models::Beta::Threads::ImageURL, type: Symbol).returns(T.attached_class) } - def self.new(image_url:, type: :image_url) - end - - sig { override.returns({image_url: OpenAI::Models::Beta::Threads::ImageURL, type: Symbol}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/image_url_delta.rbi b/rbi/lib/openai/models/beta/threads/image_url_delta.rbi deleted file mode 100644 index b5ffce82..00000000 --- a/rbi/lib/openai/models/beta/threads/image_url_delta.rbi +++ /dev/null @@ -1,49 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class ImageURLDelta < OpenAI::BaseModel - sig { returns(T.nilable(Symbol)) } - def detail - end - - sig { params(_: Symbol).returns(Symbol) } - def detail=(_) - end - - sig { returns(T.nilable(String)) } - def url - end - - sig { params(_: String).returns(String) } - def url=(_) - end - - sig { params(detail: Symbol, url: String).returns(T.attached_class) } - def self.new(detail: nil, url: nil) - end - - sig { override.returns({detail: Symbol, url: String}) } - def to_hash - end - - class Detail < OpenAI::Enum - abstract! - - AUTO = :auto - LOW = :low - HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi b/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi deleted file mode 100644 index 38635411..00000000 --- a/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi +++ /dev/null @@ -1,49 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class ImageURLDeltaBlock < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::ImageURLDelta)) } - def image_url - end - - sig do - params(_: OpenAI::Models::Beta::Threads::ImageURLDelta) - .returns(OpenAI::Models::Beta::Threads::ImageURLDelta) - end - def image_url=(_) - end - - sig do - params(index: Integer, image_url: OpenAI::Models::Beta::Threads::ImageURLDelta, type: Symbol) - .returns(T.attached_class) - end - def self.new(index:, image_url: nil, type: :image_url) - end - - sig { override.returns({index: Integer, type: Symbol, image_url: OpenAI::Models::Beta::Threads::ImageURLDelta}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message.rbi b/rbi/lib/openai/models/beta/threads/message.rbi deleted file mode 100644 index 3f40f84e..00000000 --- a/rbi/lib/openai/models/beta/threads/message.rbi +++ /dev/null @@ -1,408 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class Message < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T.nilable(String)) } - def assistant_id - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def assistant_id=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::Message::Attachment])) } - def attachments - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::Beta::Threads::Message::Attachment])) - .returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::Message::Attachment])) - end - def attachments=(_) - end - - sig { returns(T.nilable(Integer)) } - def completed_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def completed_at=(_) - end - - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlock, - OpenAI::Models::Beta::Threads::RefusalContentBlock - ) - ] - ) - end - def content - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlock, - OpenAI::Models::Beta::Threads::RefusalContentBlock - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlock, - OpenAI::Models::Beta::Threads::RefusalContentBlock - ) - ] - ) - end - def content=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def incomplete_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def incomplete_at=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Message::IncompleteDetails)) } - def incomplete_details - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Threads::Message::IncompleteDetails)) - .returns(T.nilable(OpenAI::Models::Beta::Threads::Message::IncompleteDetails)) - end - def incomplete_details=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(String)) } - def run_id - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def run_id=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig do - params( - id: String, - assistant_id: T.nilable(String), - attachments: T.nilable(T::Array[OpenAI::Models::Beta::Threads::Message::Attachment]), - completed_at: T.nilable(Integer), - content: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlock, - OpenAI::Models::Beta::Threads::RefusalContentBlock - ) - ], - created_at: Integer, - incomplete_at: T.nilable(Integer), - incomplete_details: T.nilable(OpenAI::Models::Beta::Threads::Message::IncompleteDetails), - metadata: T.nilable(OpenAI::Models::Metadata), - role: Symbol, - run_id: T.nilable(String), - status: Symbol, - thread_id: String, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - assistant_id:, - attachments:, - completed_at:, - content:, - created_at:, - incomplete_at:, - incomplete_details:, - metadata:, - role:, - run_id:, - status:, - thread_id:, - object: :"thread.message" - ) - end - - sig do - override - .returns( - { - id: String, - assistant_id: T.nilable(String), - attachments: T.nilable(T::Array[OpenAI::Models::Beta::Threads::Message::Attachment]), - completed_at: T.nilable(Integer), - content: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlock, - OpenAI::Models::Beta::Threads::RefusalContentBlock - ) - ], - created_at: Integer, - incomplete_at: T.nilable(Integer), - incomplete_details: T.nilable(OpenAI::Models::Beta::Threads::Message::IncompleteDetails), - metadata: T.nilable(OpenAI::Models::Metadata), - object: Symbol, - role: Symbol, - run_id: T.nilable(String), - status: Symbol, - thread_id: String - } - ) - end - def to_hash - end - - class Attachment < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly - ) - ] - ) - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly - ) - ] - ) - end - def tools=(_) - end - - sig do - params( - file_id: String, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly - ) - ] - ) - .returns(T.attached_class) - end - def self.new(file_id: nil, tools: nil) - end - - sig do - override - .returns( - { - file_id: String, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly - ) - ] - } - ) - end - def to_hash - end - - class Tool < OpenAI::Union - abstract! - - class AssistantToolsFileSearchTypeOnly < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :file_search) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::Beta::CodeInterpreterTool], [NilClass, OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly]] - ) - end - private def variants - end - end - end - end - - class IncompleteDetails < OpenAI::BaseModel - sig { returns(Symbol) } - def reason - end - - sig { params(_: Symbol).returns(Symbol) } - def reason=(_) - end - - sig { params(reason: Symbol).returns(T.attached_class) } - def self.new(reason:) - end - - sig { override.returns({reason: Symbol}) } - def to_hash - end - - class Reason < OpenAI::Enum - abstract! - - CONTENT_FILTER = :content_filter - MAX_TOKENS = :max_tokens - RUN_CANCELLED = :run_cancelled - RUN_EXPIRED = :run_expired - RUN_FAILED = :run_failed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class Role < OpenAI::Enum - abstract! - - USER = :user - ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - INCOMPLETE = :incomplete - COMPLETED = :completed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_content.rbi b/rbi/lib/openai/models/beta/threads/message_content.rbi deleted file mode 100644 index b2972189..00000000 --- a/rbi/lib/openai/models/beta/threads/message_content.rbi +++ /dev/null @@ -1,24 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageContent < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::ImageFileContentBlock], [Symbol, OpenAI::Models::Beta::Threads::ImageURLContentBlock], [Symbol, OpenAI::Models::Beta::Threads::TextContentBlock], [Symbol, OpenAI::Models::Beta::Threads::RefusalContentBlock]] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_content_delta.rbi b/rbi/lib/openai/models/beta/threads/message_content_delta.rbi deleted file mode 100644 index fff1ee0b..00000000 --- a/rbi/lib/openai/models/beta/threads/message_content_delta.rbi +++ /dev/null @@ -1,24 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageContentDelta < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::ImageFileDeltaBlock], [Symbol, OpenAI::Models::Beta::Threads::TextDeltaBlock], [Symbol, OpenAI::Models::Beta::Threads::RefusalDeltaBlock], [Symbol, OpenAI::Models::Beta::Threads::ImageURLDeltaBlock]] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi b/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi deleted file mode 100644 index 82903174..00000000 --- a/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi +++ /dev/null @@ -1,24 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageContentPartParam < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::ImageFileContentBlock], [Symbol, OpenAI::Models::Beta::Threads::ImageURLContentBlock], [Symbol, OpenAI::Models::Beta::Threads::TextContentBlockParam]] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_create_params.rbi b/rbi/lib/openai/models/beta/threads/message_create_params.rbi deleted file mode 100644 index 4a1db9ed..00000000 --- a/rbi/lib/openai/models/beta/threads/message_create_params.rbi +++ /dev/null @@ -1,294 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - returns( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - end - def content - end - - sig do - params( - _: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - .returns( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment])) } - def attachments - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment])) - .returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment])) - end - def attachments=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ), - role: Symbol, - attachments: T.nilable(T::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment]), - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(content:, role:, attachments: nil, metadata: nil, request_options: {}) - end - - sig do - override - .returns( - { - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ), - role: Symbol, - attachments: T.nilable(T::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment]), - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - MessageContentPartParamArray = T.type_alias do - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ] - ] - ) - end - private def variants - end - end - end - - class Role < OpenAI::Enum - abstract! - - USER = :user - ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Attachment < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch - ) - ] - ) - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch - ) - ] - ) - end - def tools=(_) - end - - sig do - params( - file_id: String, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch - ) - ] - ) - .returns(T.attached_class) - end - def self.new(file_id: nil, tools: nil) - end - - sig do - override - .returns( - { - file_id: String, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch - ) - ] - } - ) - end - def to_hash - end - - class Tool < OpenAI::Union - abstract! - - class FileSearch < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :file_search) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::CodeInterpreterTool], [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch]] - ) - end - private def variants - end - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_delete_params.rbi b/rbi/lib/openai/models/beta/threads/message_delete_params.rbi deleted file mode 100644 index 8ef23891..00000000 --- a/rbi/lib/openai/models/beta/threads/message_delete_params.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig do - params( - thread_id: String, - request_options: T.any( - OpenAI::RequestOptions, - T::Hash[Symbol, T.anything] - ) - ) - .returns(T.attached_class) - end - def self.new(thread_id:, request_options: {}) - end - - sig { override.returns({thread_id: String, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_deleted.rbi b/rbi/lib/openai/models/beta/threads/message_deleted.rbi deleted file mode 100644 index 9684886b..00000000 --- a/rbi/lib/openai/models/beta/threads/message_deleted.rbi +++ /dev/null @@ -1,43 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageDeleted < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Boolean) } - def deleted - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def deleted=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { params(id: String, deleted: T::Boolean, object: Symbol).returns(T.attached_class) } - def self.new(id:, deleted:, object: :"thread.message.deleted") - end - - sig { override.returns({id: String, deleted: T::Boolean, object: Symbol}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_delta.rbi b/rbi/lib/openai/models/beta/threads/message_delta.rbi deleted file mode 100644 index 4830ec92..00000000 --- a/rbi/lib/openai/models/beta/threads/message_delta.rbi +++ /dev/null @@ -1,110 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageDelta < OpenAI::BaseModel - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, - OpenAI::Models::Beta::Threads::TextDeltaBlock, - OpenAI::Models::Beta::Threads::RefusalDeltaBlock, - OpenAI::Models::Beta::Threads::ImageURLDeltaBlock - ) - ] - ) - ) - end - def content - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, - OpenAI::Models::Beta::Threads::TextDeltaBlock, - OpenAI::Models::Beta::Threads::RefusalDeltaBlock, - OpenAI::Models::Beta::Threads::ImageURLDeltaBlock - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, - OpenAI::Models::Beta::Threads::TextDeltaBlock, - OpenAI::Models::Beta::Threads::RefusalDeltaBlock, - OpenAI::Models::Beta::Threads::ImageURLDeltaBlock - ) - ] - ) - end - def content=(_) - end - - sig { returns(T.nilable(Symbol)) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig do - params( - content: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, - OpenAI::Models::Beta::Threads::TextDeltaBlock, - OpenAI::Models::Beta::Threads::RefusalDeltaBlock, - OpenAI::Models::Beta::Threads::ImageURLDeltaBlock - ) - ], - role: Symbol - ) - .returns(T.attached_class) - end - def self.new(content: nil, role: nil) - end - - sig do - override - .returns( - { - content: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, - OpenAI::Models::Beta::Threads::TextDeltaBlock, - OpenAI::Models::Beta::Threads::RefusalDeltaBlock, - OpenAI::Models::Beta::Threads::ImageURLDeltaBlock - ) - ], - role: Symbol - } - ) - end - def to_hash - end - - class Role < OpenAI::Enum - abstract! - - USER = :user - ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_delta_event.rbi b/rbi/lib/openai/models/beta/threads/message_delta_event.rbi deleted file mode 100644 index 7908606f..00000000 --- a/rbi/lib/openai/models/beta/threads/message_delta_event.rbi +++ /dev/null @@ -1,48 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageDeltaEvent < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(OpenAI::Models::Beta::Threads::MessageDelta) } - def delta - end - - sig do - params(_: OpenAI::Models::Beta::Threads::MessageDelta).returns(OpenAI::Models::Beta::Threads::MessageDelta) - end - def delta=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig do - params(id: String, delta: OpenAI::Models::Beta::Threads::MessageDelta, object: Symbol) - .returns(T.attached_class) - end - def self.new(id:, delta:, object: :"thread.message.delta") - end - - sig { override.returns({id: String, delta: OpenAI::Models::Beta::Threads::MessageDelta, object: Symbol}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_list_params.rbi b/rbi/lib/openai/models/beta/threads/message_list_params.rbi deleted file mode 100644 index d2cb9550..00000000 --- a/rbi/lib/openai/models/beta/threads/message_list_params.rbi +++ /dev/null @@ -1,97 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(String)) } - def before - end - - sig { params(_: String).returns(String) } - def before=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig { returns(T.nilable(String)) } - def run_id - end - - sig { params(_: String).returns(String) } - def run_id=(_) - end - - sig do - params( - after: String, - before: String, - limit: Integer, - order: Symbol, - run_id: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, before: nil, limit: nil, order: nil, run_id: nil, request_options: {}) - end - - sig do - override - .returns( - { - after: String, - before: String, - limit: Integer, - order: Symbol, - run_id: String, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_retrieve_params.rbi b/rbi/lib/openai/models/beta/threads/message_retrieve_params.rbi deleted file mode 100644 index 19dca144..00000000 --- a/rbi/lib/openai/models/beta/threads/message_retrieve_params.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig do - params( - thread_id: String, - request_options: T.any( - OpenAI::RequestOptions, - T::Hash[Symbol, T.anything] - ) - ) - .returns(T.attached_class) - end - def self.new(thread_id:, request_options: {}) - end - - sig { override.returns({thread_id: String, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/message_update_params.rbi b/rbi/lib/openai/models/beta/threads/message_update_params.rbi deleted file mode 100644 index 4fe12d9d..00000000 --- a/rbi/lib/openai/models/beta/threads/message_update_params.rbi +++ /dev/null @@ -1,50 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class MessageUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - thread_id: String, - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(thread_id:, metadata: nil, request_options: {}) - end - - sig do - override - .returns( - {thread_id: String, metadata: T.nilable(OpenAI::Models::Metadata), request_options: OpenAI::RequestOptions} - ) - end - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi b/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi deleted file mode 100644 index 2451c015..00000000 --- a/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi +++ /dev/null @@ -1,35 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class RefusalContentBlock < OpenAI::BaseModel - sig { returns(String) } - def refusal - end - - sig { params(_: String).returns(String) } - def refusal=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(refusal: String, type: Symbol).returns(T.attached_class) } - def self.new(refusal:, type: :refusal) - end - - sig { override.returns({refusal: String, type: Symbol}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi b/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi deleted file mode 100644 index c6f9732d..00000000 --- a/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi +++ /dev/null @@ -1,43 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class RefusalDeltaBlock < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def refusal - end - - sig { params(_: String).returns(String) } - def refusal=(_) - end - - sig { params(index: Integer, refusal: String, type: Symbol).returns(T.attached_class) } - def self.new(index:, refusal: nil, type: :refusal) - end - - sig { override.returns({index: Integer, type: Symbol, refusal: String}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi b/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi deleted file mode 100644 index 2627ee3f..00000000 --- a/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi +++ /dev/null @@ -1,84 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class RequiredActionFunctionToolCall < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function) } - def function - end - - sig do - params(_: OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function) - .returns(OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function) - end - def function=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - id: String, - function: OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, function:, type: :function) - end - - sig do - override - .returns( - {id: String, function: OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function, type: Symbol} - ) - end - def to_hash - end - - class Function < OpenAI::BaseModel - sig { returns(String) } - def arguments - end - - sig { params(_: String).returns(String) } - def arguments=(_) - end - - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { params(arguments: String, name: String).returns(T.attached_class) } - def self.new(arguments:, name:) - end - - sig { override.returns({arguments: String, name: String}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/run.rbi b/rbi/lib/openai/models/beta/threads/run.rbi deleted file mode 100644 index 61ddea55..00000000 --- a/rbi/lib/openai/models/beta/threads/run.rbi +++ /dev/null @@ -1,648 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class Run < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def assistant_id - end - - sig { params(_: String).returns(String) } - def assistant_id=(_) - end - - sig { returns(T.nilable(Integer)) } - def cancelled_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def cancelled_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def completed_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def completed_at=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def expires_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def expires_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def failed_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def failed_at=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::IncompleteDetails)) } - def incomplete_details - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Threads::Run::IncompleteDetails)) - .returns(T.nilable(OpenAI::Models::Beta::Threads::Run::IncompleteDetails)) - end - def incomplete_details=(_) - end - - sig { returns(String) } - def instructions - end - - sig { params(_: String).returns(String) } - def instructions=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::LastError)) } - def last_error - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Threads::Run::LastError)) - .returns(T.nilable(OpenAI::Models::Beta::Threads::Run::LastError)) - end - def last_error=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_completion_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_completion_tokens=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_prompt_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_prompt_tokens=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(String) } - def model - end - - sig { params(_: String).returns(String) } - def model=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(T::Boolean) } - def parallel_tool_calls - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def parallel_tool_calls=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::RequiredAction)) } - def required_action - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Threads::Run::RequiredAction)) - .returns(T.nilable(OpenAI::Models::Beta::Threads::Run::RequiredAction)) - end - def required_action=(_) - end - - sig do - returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format - end - - sig do - params( - _: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - .returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format=(_) - end - - sig { returns(T.nilable(Integer)) } - def started_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def started_at=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) } - def tool_choice - end - - sig do - params(_: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) - .returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) - end - def tool_choice=(_) - end - - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - end - def tools=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::TruncationStrategy)) } - def truncation_strategy - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Threads::Run::TruncationStrategy)) - .returns(T.nilable(OpenAI::Models::Beta::Threads::Run::TruncationStrategy)) - end - def truncation_strategy=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::Usage)) } - def usage - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Threads::Run::Usage)) - .returns(T.nilable(OpenAI::Models::Beta::Threads::Run::Usage)) - end - def usage=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def temperature=(_) - end - - sig { returns(T.nilable(Float)) } - def top_p - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def top_p=(_) - end - - sig do - params( - id: String, - assistant_id: String, - cancelled_at: T.nilable(Integer), - completed_at: T.nilable(Integer), - created_at: Integer, - expires_at: T.nilable(Integer), - failed_at: T.nilable(Integer), - incomplete_details: T.nilable(OpenAI::Models::Beta::Threads::Run::IncompleteDetails), - instructions: String, - last_error: T.nilable(OpenAI::Models::Beta::Threads::Run::LastError), - max_completion_tokens: T.nilable(Integer), - max_prompt_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - model: String, - parallel_tool_calls: T::Boolean, - required_action: T.nilable(OpenAI::Models::Beta::Threads::Run::RequiredAction), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - started_at: T.nilable(Integer), - status: Symbol, - thread_id: String, - tool_choice: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)), - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ], - truncation_strategy: T.nilable(OpenAI::Models::Beta::Threads::Run::TruncationStrategy), - usage: T.nilable(OpenAI::Models::Beta::Threads::Run::Usage), - temperature: T.nilable(Float), - top_p: T.nilable(Float), - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - assistant_id:, - cancelled_at:, - completed_at:, - created_at:, - expires_at:, - failed_at:, - incomplete_details:, - instructions:, - last_error:, - max_completion_tokens:, - max_prompt_tokens:, - metadata:, - model:, - parallel_tool_calls:, - required_action:, - response_format:, - started_at:, - status:, - thread_id:, - tool_choice:, - tools:, - truncation_strategy:, - usage:, - temperature: nil, - top_p: nil, - object: :"thread.run" - ) - end - - sig do - override - .returns( - { - id: String, - assistant_id: String, - cancelled_at: T.nilable(Integer), - completed_at: T.nilable(Integer), - created_at: Integer, - expires_at: T.nilable(Integer), - failed_at: T.nilable(Integer), - incomplete_details: T.nilable(OpenAI::Models::Beta::Threads::Run::IncompleteDetails), - instructions: String, - last_error: T.nilable(OpenAI::Models::Beta::Threads::Run::LastError), - max_completion_tokens: T.nilable(Integer), - max_prompt_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - model: String, - object: Symbol, - parallel_tool_calls: T::Boolean, - required_action: T.nilable(OpenAI::Models::Beta::Threads::Run::RequiredAction), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - started_at: T.nilable(Integer), - status: Symbol, - thread_id: String, - tool_choice: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)), - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ], - truncation_strategy: T.nilable(OpenAI::Models::Beta::Threads::Run::TruncationStrategy), - usage: T.nilable(OpenAI::Models::Beta::Threads::Run::Usage), - temperature: T.nilable(Float), - top_p: T.nilable(Float) - } - ) - end - def to_hash - end - - class IncompleteDetails < OpenAI::BaseModel - sig { returns(T.nilable(Symbol)) } - def reason - end - - sig { params(_: Symbol).returns(Symbol) } - def reason=(_) - end - - sig { params(reason: Symbol).returns(T.attached_class) } - def self.new(reason: nil) - end - - sig { override.returns({reason: Symbol}) } - def to_hash - end - - class Reason < OpenAI::Enum - abstract! - - MAX_COMPLETION_TOKENS = :max_completion_tokens - MAX_PROMPT_TOKENS = :max_prompt_tokens - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class LastError < OpenAI::BaseModel - sig { returns(Symbol) } - def code - end - - sig { params(_: Symbol).returns(Symbol) } - def code=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { params(code: Symbol, message: String).returns(T.attached_class) } - def self.new(code:, message:) - end - - sig { override.returns({code: Symbol, message: String}) } - def to_hash - end - - class Code < OpenAI::Enum - abstract! - - SERVER_ERROR = :server_error - RATE_LIMIT_EXCEEDED = :rate_limit_exceeded - INVALID_PROMPT = :invalid_prompt - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class RequiredAction < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs) } - def submit_tool_outputs - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs) - .returns(OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs) - end - def submit_tool_outputs=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - submit_tool_outputs: OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(submit_tool_outputs:, type: :submit_tool_outputs) - end - - sig do - override - .returns( - {submit_tool_outputs: OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, type: Symbol} - ) - end - def to_hash - end - - class SubmitToolOutputs < OpenAI::BaseModel - sig { returns(T::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall]) } - def tool_calls - end - - sig do - params(_: T::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall]) - .returns(T::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall]) - end - def tool_calls=(_) - end - - sig do - params(tool_calls: T::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall]) - .returns(T.attached_class) - end - def self.new(tool_calls:) - end - - sig { override.returns({tool_calls: T::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall]}) } - def to_hash - end - end - end - - class TruncationStrategy < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(Integer)) } - def last_messages - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def last_messages=(_) - end - - sig { params(type: Symbol, last_messages: T.nilable(Integer)).returns(T.attached_class) } - def self.new(type:, last_messages: nil) - end - - sig { override.returns({type: Symbol, last_messages: T.nilable(Integer)}) } - def to_hash - end - - class Type < OpenAI::Enum - abstract! - - AUTO = :auto - LAST_MESSAGES = :last_messages - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class Usage < OpenAI::BaseModel - sig { returns(Integer) } - def completion_tokens - end - - sig { params(_: Integer).returns(Integer) } - def completion_tokens=(_) - end - - sig { returns(Integer) } - def prompt_tokens - end - - sig { params(_: Integer).returns(Integer) } - def prompt_tokens=(_) - end - - sig { returns(Integer) } - def total_tokens - end - - sig { params(_: Integer).returns(Integer) } - def total_tokens=(_) - end - - sig do - params( - completion_tokens: Integer, - prompt_tokens: Integer, - total_tokens: Integer - ).returns(T.attached_class) - end - def self.new(completion_tokens:, prompt_tokens:, total_tokens:) - end - - sig do - override.returns({completion_tokens: Integer, prompt_tokens: Integer, total_tokens: Integer}) - end - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/run_cancel_params.rbi b/rbi/lib/openai/models/beta/threads/run_cancel_params.rbi deleted file mode 100644 index 119500b5..00000000 --- a/rbi/lib/openai/models/beta/threads/run_cancel_params.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class RunCancelParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig do - params( - thread_id: String, - request_options: T.any( - OpenAI::RequestOptions, - T::Hash[Symbol, T.anything] - ) - ) - .returns(T.attached_class) - end - def self.new(thread_id:, request_options: {}) - end - - sig { override.returns({thread_id: String, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/run_create_params.rbi b/rbi/lib/openai/models/beta/threads/run_create_params.rbi deleted file mode 100644 index 3c923be3..00000000 --- a/rbi/lib/openai/models/beta/threads/run_create_params.rbi +++ /dev/null @@ -1,666 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class RunCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def assistant_id - end - - sig { params(_: String).returns(String) } - def assistant_id=(_) - end - - sig { returns(T.nilable(T::Array[Symbol])) } - def include - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def include=(_) - end - - sig { returns(T.nilable(String)) } - def additional_instructions - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def additional_instructions=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage])) } - def additional_messages - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage])) - .returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage])) - end - def additional_messages=(_) - end - - sig { returns(T.nilable(String)) } - def instructions - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def instructions=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_completion_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_completion_tokens=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_prompt_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_prompt_tokens=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(T.any(String, Symbol))) } - def model - end - - sig { params(_: T.nilable(T.any(String, Symbol))).returns(T.nilable(T.any(String, Symbol))) } - def model=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def parallel_tool_calls - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def parallel_tool_calls=(_) - end - - sig { returns(T.nilable(Symbol)) } - def reasoning_effort - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def reasoning_effort=(_) - end - - sig do - returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format - end - - sig do - params( - _: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - .returns( - T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ) - ) - end - def response_format=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def temperature=(_) - end - - sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) } - def tool_choice - end - - sig do - params(_: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) - .returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) - end - def tool_choice=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - ) - end - def tools - end - - sig do - params( - _: T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - ) - .returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ) - ) - end - def tools=(_) - end - - sig { returns(T.nilable(Float)) } - def top_p - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def top_p=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy)) } - def truncation_strategy - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy)) - .returns(T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy)) - end - def truncation_strategy=(_) - end - - sig do - params( - assistant_id: String, - include: T::Array[Symbol], - additional_instructions: T.nilable(String), - additional_messages: T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage]), - instructions: T.nilable(String), - max_completion_tokens: T.nilable(Integer), - max_prompt_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.nilable(T.any(String, Symbol)), - parallel_tool_calls: T::Boolean, - reasoning_effort: T.nilable(Symbol), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_choice: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)), - tools: T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ), - top_p: T.nilable(Float), - truncation_strategy: T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - assistant_id:, - include: nil, - additional_instructions: nil, - additional_messages: nil, - instructions: nil, - max_completion_tokens: nil, - max_prompt_tokens: nil, - metadata: nil, - model: nil, - parallel_tool_calls: nil, - reasoning_effort: nil, - response_format: nil, - temperature: nil, - tool_choice: nil, - tools: nil, - top_p: nil, - truncation_strategy: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - assistant_id: String, - include: T::Array[Symbol], - additional_instructions: T.nilable(String), - additional_messages: T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage]), - instructions: T.nilable(String), - max_completion_tokens: T.nilable(Integer), - max_prompt_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.nilable(T.any(String, Symbol)), - parallel_tool_calls: T::Boolean, - reasoning_effort: T.nilable(Symbol), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_choice: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)), - tools: T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ), - top_p: T.nilable(Float), - truncation_strategy: T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class AdditionalMessage < OpenAI::BaseModel - sig do - returns( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - end - def content - end - - sig do - params( - _: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - .returns( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ) - ) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig do - returns( - T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]) - ) - end - def attachments - end - - sig do - params( - _: T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]) - ) - .returns( - T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]) - ) - end - def attachments=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ), - role: Symbol, - attachments: T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]), - metadata: T.nilable(OpenAI::Models::Metadata) - ) - .returns(T.attached_class) - end - def self.new(content:, role:, attachments: nil, metadata: nil) - end - - sig do - override - .returns( - { - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ), - role: Symbol, - attachments: T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]), - metadata: T.nilable(OpenAI::Models::Metadata) - } - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - MessageContentPartParamArray = T.type_alias do - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ] - ] - ) - end - private def variants - end - end - end - - class Role < OpenAI::Enum - abstract! - - USER = :user - ASSISTANT = :assistant - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Attachment < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch - ) - ] - ) - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch - ) - ] - ) - end - def tools=(_) - end - - sig do - params( - file_id: String, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch - ) - ] - ) - .returns(T.attached_class) - end - def self.new(file_id: nil, tools: nil) - end - - sig do - override - .returns( - { - file_id: String, - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch - ) - ] - } - ) - end - def to_hash - end - - class Tool < OpenAI::Union - abstract! - - class FileSearch < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :file_search) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::CodeInterpreterTool], [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch]] - ) - end - private def variants - end - end - end - end - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class TruncationStrategy < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(Integer)) } - def last_messages - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def last_messages=(_) - end - - sig { params(type: Symbol, last_messages: T.nilable(Integer)).returns(T.attached_class) } - def self.new(type:, last_messages: nil) - end - - sig { override.returns({type: Symbol, last_messages: T.nilable(Integer)}) } - def to_hash - end - - class Type < OpenAI::Enum - abstract! - - AUTO = :auto - LAST_MESSAGES = :last_messages - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/run_list_params.rbi b/rbi/lib/openai/models/beta/threads/run_list_params.rbi deleted file mode 100644 index cffb671d..00000000 --- a/rbi/lib/openai/models/beta/threads/run_list_params.rbi +++ /dev/null @@ -1,87 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class RunListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(String)) } - def before - end - - sig { params(_: String).returns(String) } - def before=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig do - params( - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, before: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - override - .returns( - { - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/run_retrieve_params.rbi b/rbi/lib/openai/models/beta/threads/run_retrieve_params.rbi deleted file mode 100644 index fa5675d0..00000000 --- a/rbi/lib/openai/models/beta/threads/run_retrieve_params.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class RunRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig do - params( - thread_id: String, - request_options: T.any( - OpenAI::RequestOptions, - T::Hash[Symbol, T.anything] - ) - ) - .returns(T.attached_class) - end - def self.new(thread_id:, request_options: {}) - end - - sig { override.returns({thread_id: String, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/run_status.rbi b/rbi/lib/openai/models/beta/threads/run_status.rbi deleted file mode 100644 index e890d711..00000000 --- a/rbi/lib/openai/models/beta/threads/run_status.rbi +++ /dev/null @@ -1,29 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class RunStatus < OpenAI::Enum - abstract! - - QUEUED = :queued - IN_PROGRESS = :in_progress - REQUIRES_ACTION = :requires_action - CANCELLING = :cancelling - CANCELLED = :cancelled - FAILED = :failed - COMPLETED = :completed - INCOMPLETE = :incomplete - EXPIRED = :expired - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi b/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi deleted file mode 100644 index 42c12d0a..00000000 --- a/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi +++ /dev/null @@ -1,83 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class RunSubmitToolOutputsParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig { returns(T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput]) } - def tool_outputs - end - - sig do - params(_: T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput]) - .returns(T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput]) - end - def tool_outputs=(_) - end - - sig do - params( - thread_id: String, - tool_outputs: T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(thread_id:, tool_outputs:, request_options: {}) - end - - sig do - override - .returns( - { - thread_id: String, - tool_outputs: T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class ToolOutput < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def output - end - - sig { params(_: String).returns(String) } - def output=(_) - end - - sig { returns(T.nilable(String)) } - def tool_call_id - end - - sig { params(_: String).returns(String) } - def tool_call_id=(_) - end - - sig { params(output: String, tool_call_id: String).returns(T.attached_class) } - def self.new(output: nil, tool_call_id: nil) - end - - sig { override.returns({output: String, tool_call_id: String}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/run_update_params.rbi b/rbi/lib/openai/models/beta/threads/run_update_params.rbi deleted file mode 100644 index 0ede0dab..00000000 --- a/rbi/lib/openai/models/beta/threads/run_update_params.rbi +++ /dev/null @@ -1,50 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class RunUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - thread_id: String, - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(thread_id:, metadata: nil, request_options: {}) - end - - sig do - override - .returns( - {thread_id: String, metadata: T.nilable(OpenAI::Models::Metadata), request_options: OpenAI::RequestOptions} - ) - end - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi deleted file mode 100644 index 53f5040c..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi +++ /dev/null @@ -1,45 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class CodeInterpreterLogs < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def logs - end - - sig { params(_: String).returns(String) } - def logs=(_) - end - - sig { params(index: Integer, logs: String, type: Symbol).returns(T.attached_class) } - def self.new(index:, logs: nil, type: :logs) - end - - sig { override.returns({index: Integer, type: Symbol, logs: String}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi deleted file mode 100644 index e3a5d1f7..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi +++ /dev/null @@ -1,78 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class CodeInterpreterOutputImage < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image)) } - def image - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image) - .returns(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image) - end - def image=(_) - end - - sig do - params( - index: Integer, - image: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(index:, image: nil, type: :image) - end - - sig do - override - .returns( - {index: Integer, type: Symbol, image: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image} - ) - end - def to_hash - end - - class Image < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { params(file_id: String).returns(T.attached_class) } - def self.new(file_id: nil) - end - - sig { override.returns({file_id: String}) } - def to_hash - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi deleted file mode 100644 index fb36dea2..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi +++ /dev/null @@ -1,250 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class CodeInterpreterToolCall < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter) } - def code_interpreter - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter) - .returns(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter) - end - def code_interpreter=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - id: String, - code_interpreter: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, code_interpreter:, type: :code_interpreter) - end - - sig do - override - .returns( - { - id: String, - code_interpreter: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter, - type: Symbol - } - ) - end - def to_hash - end - - class CodeInterpreter < OpenAI::BaseModel - sig { returns(String) } - def input - end - - sig { params(_: String).returns(String) } - def input=(_) - end - - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image - ) - ] - ) - end - def outputs - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image - ) - ] - ) - end - def outputs=(_) - end - - sig do - params( - input: String, - outputs: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image - ) - ] - ) - .returns(T.attached_class) - end - def self.new(input:, outputs:) - end - - sig do - override - .returns( - { - input: String, - outputs: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image - ) - ] - } - ) - end - def to_hash - end - - class Output < OpenAI::Union - abstract! - - class Logs < OpenAI::BaseModel - sig { returns(String) } - def logs - end - - sig { params(_: String).returns(String) } - def logs=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(logs: String, type: Symbol).returns(T.attached_class) } - def self.new(logs:, type: :logs) - end - - sig { override.returns({logs: String, type: Symbol}) } - def to_hash - end - end - - class Image < OpenAI::BaseModel - sig do - returns( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image - ) - end - def image - end - - sig do - params( - _: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image - ) - .returns( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image - ) - end - def image=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - image: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(image:, type: :image) - end - - sig do - override - .returns( - { - image: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image, - type: Symbol - } - ) - end - def to_hash - end - - class Image < OpenAI::BaseModel - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { params(file_id: String).returns(T.attached_class) } - def self.new(file_id:) - end - - sig { override.returns({file_id: String}) } - def to_hash - end - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs], [Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image]] - ) - end - private def variants - end - end - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi deleted file mode 100644 index 763d9012..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi +++ /dev/null @@ -1,167 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class CodeInterpreterToolCallDelta < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter)) } - def code_interpreter - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter) - .returns(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter) - end - def code_interpreter=(_) - end - - sig do - params( - index: Integer, - id: String, - code_interpreter: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(index:, id: nil, code_interpreter: nil, type: :code_interpreter) - end - - sig do - override - .returns( - { - index: Integer, - type: Symbol, - id: String, - code_interpreter: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter - } - ) - end - def to_hash - end - - class CodeInterpreter < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def input - end - - sig { params(_: String).returns(String) } - def input=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage - ) - ] - ) - ) - end - def outputs - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage - ) - ] - ) - end - def outputs=(_) - end - - sig do - params( - input: String, - outputs: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage - ) - ] - ) - .returns(T.attached_class) - end - def self.new(input: nil, outputs: nil) - end - - sig do - override - .returns( - { - input: String, - outputs: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage - ) - ] - } - ) - end - def to_hash - end - - class Output < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs], [Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage]] - ) - end - private def variants - end - end - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi deleted file mode 100644 index 9aaccc68..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi +++ /dev/null @@ -1,250 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class FileSearchToolCall < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch) } - def file_search - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch) - .returns(OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch) - end - def file_search=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - id: String, - file_search: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, file_search:, type: :file_search) - end - - sig do - override - .returns( - {id: String, file_search: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch, type: Symbol} - ) - end - def to_hash - end - - class FileSearch < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions)) } - def ranking_options - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions) - .returns(OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions) - end - def ranking_options=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result])) } - def results - end - - sig do - params(_: T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result]) - .returns(T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result]) - end - def results=(_) - end - - sig do - params( - ranking_options: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, - results: T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] - ) - .returns(T.attached_class) - end - def self.new(ranking_options: nil, results: nil) - end - - sig do - override - .returns( - { - ranking_options: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, - results: T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] - } - ) - end - def to_hash - end - - class RankingOptions < OpenAI::BaseModel - sig { returns(Symbol) } - def ranker - end - - sig { params(_: Symbol).returns(Symbol) } - def ranker=(_) - end - - sig { returns(Float) } - def score_threshold - end - - sig { params(_: Float).returns(Float) } - def score_threshold=(_) - end - - sig { params(ranker: Symbol, score_threshold: Float).returns(T.attached_class) } - def self.new(ranker:, score_threshold:) - end - - sig { override.returns({ranker: Symbol, score_threshold: Float}) } - def to_hash - end - - class Ranker < OpenAI::Enum - abstract! - - AUTO = :auto - DEFAULT_2024_08_21 = :default_2024_08_21 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class Result < OpenAI::BaseModel - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(String) } - def file_name - end - - sig { params(_: String).returns(String) } - def file_name=(_) - end - - sig { returns(Float) } - def score - end - - sig { params(_: Float).returns(Float) } - def score=(_) - end - - sig do - returns( - T.nilable(T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content]) - ) - end - def content - end - - sig do - params(_: T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content]) - .returns(T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content]) - end - def content=(_) - end - - sig do - params( - file_id: String, - file_name: String, - score: Float, - content: T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] - ) - .returns(T.attached_class) - end - def self.new(file_id:, file_name:, score:, content: nil) - end - - sig do - override - .returns( - { - file_id: String, - file_name: String, - score: Float, - content: T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] - } - ) - end - def to_hash - end - - class Content < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(T.nilable(Symbol)) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(text: String, type: Symbol).returns(T.attached_class) } - def self.new(text: nil, type: nil) - end - - sig { override.returns({text: String, type: Symbol}) } - def to_hash - end - - class Type < OpenAI::Enum - abstract! - - TEXT = :text - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi deleted file mode 100644 index 82d07489..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi +++ /dev/null @@ -1,60 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class FileSearchToolCallDelta < OpenAI::BaseModel - sig { returns(T.anything) } - def file_search - end - - sig { params(_: T.anything).returns(T.anything) } - def file_search=(_) - end - - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig do - params( - file_search: T.anything, - index: Integer, - id: String, - type: Symbol - ).returns(T.attached_class) - end - def self.new(file_search:, index:, id: nil, type: :file_search) - end - - sig { override.returns({file_search: T.anything, index: Integer, type: Symbol, id: String}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi deleted file mode 100644 index 4f13f258..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi +++ /dev/null @@ -1,96 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class FunctionToolCall < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function) } - def function - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function) - .returns(OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function) - end - def function=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - id: String, - function: OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, function:, type: :function) - end - - sig do - override - .returns( - {id: String, function: OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function, type: Symbol} - ) - end - def to_hash - end - - class Function < OpenAI::BaseModel - sig { returns(String) } - def arguments - end - - sig { params(_: String).returns(String) } - def arguments=(_) - end - - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(T.nilable(String)) } - def output - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def output=(_) - end - - sig do - params(arguments: String, name: String, output: T.nilable(String)).returns(T.attached_class) - end - def self.new(arguments:, name:, output:) - end - - sig { override.returns({arguments: String, name: String, output: T.nilable(String)}) } - def to_hash - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi deleted file mode 100644 index d5acf0b1..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi +++ /dev/null @@ -1,110 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class FunctionToolCallDelta < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function)) } - def function - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function) - .returns(OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function) - end - def function=(_) - end - - sig do - params( - index: Integer, - id: String, - function: OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(index:, id: nil, function: nil, type: :function) - end - - sig do - override - .returns( - { - index: Integer, - type: Symbol, - id: String, - function: OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function - } - ) - end - def to_hash - end - - class Function < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def arguments - end - - sig { params(_: String).returns(String) } - def arguments=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(T.nilable(String)) } - def output - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def output=(_) - end - - sig do - params(arguments: String, name: String, output: T.nilable(String)).returns(T.attached_class) - end - def self.new(arguments: nil, name: nil, output: nil) - end - - sig { override.returns({arguments: String, name: String, output: T.nilable(String)}) } - def to_hash - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi b/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi deleted file mode 100644 index 73222dd0..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi +++ /dev/null @@ -1,72 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class MessageCreationStepDetails < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation) } - def message_creation - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation) - .returns(OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation) - end - def message_creation=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - message_creation: OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(message_creation:, type: :message_creation) - end - - sig do - override - .returns( - { - message_creation: OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, - type: Symbol - } - ) - end - def to_hash - end - - class MessageCreation < OpenAI::BaseModel - sig { returns(String) } - def message_id - end - - sig { params(_: String).returns(String) } - def message_id=(_) - end - - sig { params(message_id: String).returns(T.attached_class) } - def self.new(message_id:) - end - - sig { override.returns({message_id: String}) } - def to_hash - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step.rbi deleted file mode 100644 index 2f08a34f..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/run_step.rbi +++ /dev/null @@ -1,367 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - RunStep = T.type_alias { Runs::RunStep } - - module Runs - class RunStep < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def assistant_id - end - - sig { params(_: String).returns(String) } - def assistant_id=(_) - end - - sig { returns(T.nilable(Integer)) } - def cancelled_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def cancelled_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def completed_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def completed_at=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def expired_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def expired_at=(_) - end - - sig { returns(T.nilable(Integer)) } - def failed_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def failed_at=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::LastError)) } - def last_error - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::LastError)) - .returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::LastError)) - end - def last_error=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(String) } - def run_id - end - - sig { params(_: String).returns(String) } - def run_id=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig do - returns( - T.any( - OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, - OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails - ) - ) - end - def step_details - end - - sig do - params( - _: T.any( - OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, - OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails - ) - ) - .returns( - T.any( - OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, - OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails - ) - ) - end - def step_details=(_) - end - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::Usage)) } - def usage - end - - sig do - params(_: T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::Usage)) - .returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::Usage)) - end - def usage=(_) - end - - sig do - params( - id: String, - assistant_id: String, - cancelled_at: T.nilable(Integer), - completed_at: T.nilable(Integer), - created_at: Integer, - expired_at: T.nilable(Integer), - failed_at: T.nilable(Integer), - last_error: T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::LastError), - metadata: T.nilable(OpenAI::Models::Metadata), - run_id: String, - status: Symbol, - step_details: T.any( - OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, - OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails - ), - thread_id: String, - type: Symbol, - usage: T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::Usage), - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - assistant_id:, - cancelled_at:, - completed_at:, - created_at:, - expired_at:, - failed_at:, - last_error:, - metadata:, - run_id:, - status:, - step_details:, - thread_id:, - type:, - usage:, - object: :"thread.run.step" - ) - end - - sig do - override - .returns( - { - id: String, - assistant_id: String, - cancelled_at: T.nilable(Integer), - completed_at: T.nilable(Integer), - created_at: Integer, - expired_at: T.nilable(Integer), - failed_at: T.nilable(Integer), - last_error: T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::LastError), - metadata: T.nilable(OpenAI::Models::Metadata), - object: Symbol, - run_id: String, - status: Symbol, - step_details: T.any( - OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, - OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails - ), - thread_id: String, - type: Symbol, - usage: T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::Usage) - } - ) - end - def to_hash - end - - class LastError < OpenAI::BaseModel - sig { returns(Symbol) } - def code - end - - sig { params(_: Symbol).returns(Symbol) } - def code=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { params(code: Symbol, message: String).returns(T.attached_class) } - def self.new(code:, message:) - end - - sig { override.returns({code: Symbol, message: String}) } - def to_hash - end - - class Code < OpenAI::Enum - abstract! - - SERVER_ERROR = :server_error - RATE_LIMIT_EXCEEDED = :rate_limit_exceeded - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - CANCELLED = :cancelled - FAILED = :failed - COMPLETED = :completed - EXPIRED = :expired - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class StepDetails < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails], [Symbol, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails]] - ) - end - private def variants - end - end - end - - class Type < OpenAI::Enum - abstract! - - MESSAGE_CREATION = :message_creation - TOOL_CALLS = :tool_calls - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Usage < OpenAI::BaseModel - sig { returns(Integer) } - def completion_tokens - end - - sig { params(_: Integer).returns(Integer) } - def completion_tokens=(_) - end - - sig { returns(Integer) } - def prompt_tokens - end - - sig { params(_: Integer).returns(Integer) } - def prompt_tokens=(_) - end - - sig { returns(Integer) } - def total_tokens - end - - sig { params(_: Integer).returns(Integer) } - def total_tokens=(_) - end - - sig do - params( - completion_tokens: Integer, - prompt_tokens: Integer, - total_tokens: Integer - ).returns(T.attached_class) - end - def self.new(completion_tokens:, prompt_tokens:, total_tokens:) - end - - sig do - override.returns({completion_tokens: Integer, prompt_tokens: Integer, total_tokens: Integer}) - end - def to_hash - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi deleted file mode 100644 index 60805244..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi +++ /dev/null @@ -1,86 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - RunStepDelta = T.type_alias { Runs::RunStepDelta } - - module Runs - class RunStepDelta < OpenAI::BaseModel - sig do - returns( - T.nilable( - T.any( - OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, - OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject - ) - ) - ) - end - def step_details - end - - sig do - params( - _: T.any( - OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, - OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject - ) - ) - .returns( - T.any( - OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, - OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject - ) - ) - end - def step_details=(_) - end - - sig do - params( - step_details: T.any( - OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, - OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject - ) - ) - .returns(T.attached_class) - end - def self.new(step_details: nil) - end - - sig do - override - .returns( - { - step_details: T.any( - OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, - OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject - ) - } - ) - end - def to_hash - end - - class StepDetails < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta], [Symbol, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject]] - ) - end - private def variants - end - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi deleted file mode 100644 index 73076f3a..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi +++ /dev/null @@ -1,53 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - RunStepDeltaEvent = T.type_alias { Runs::RunStepDeltaEvent } - - module Runs - class RunStepDeltaEvent < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDelta) } - def delta - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStepDelta) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStepDelta) - end - def delta=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig do - params(id: String, delta: OpenAI::Models::Beta::Threads::Runs::RunStepDelta, object: Symbol) - .returns(T.attached_class) - end - def self.new(id:, delta:, object: :"thread.run.step.delta") - end - - sig { override.returns({id: String, delta: OpenAI::Models::Beta::Threads::Runs::RunStepDelta, object: Symbol}) } - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi deleted file mode 100644 index e933c731..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi +++ /dev/null @@ -1,74 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - RunStepDeltaMessageDelta = T.type_alias { Runs::RunStepDeltaMessageDelta } - - module Runs - class RunStepDeltaMessageDelta < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation)) } - def message_creation - end - - sig do - params(_: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation) - end - def message_creation=(_) - end - - sig do - params( - message_creation: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(message_creation: nil, type: :message_creation) - end - - sig do - override - .returns( - { - type: Symbol, - message_creation: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation - } - ) - end - def to_hash - end - - class MessageCreation < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def message_id - end - - sig { params(_: String).returns(String) } - def message_id=(_) - end - - sig { params(message_id: String).returns(T.attached_class) } - def self.new(message_id: nil) - end - - sig { override.returns({message_id: String}) } - def to_hash - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_include.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_include.rbi deleted file mode 100644 index b2726421..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_include.rbi +++ /dev/null @@ -1,25 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - RunStepInclude = T.type_alias { Runs::RunStepInclude } - - module Runs - class RunStepInclude < OpenAI::Enum - abstract! - - STEP_DETAILS_TOOL_CALLS_FILE_SEARCH_RESULTS_CONTENT = :"step_details.tool_calls[*].file_search.results[*].content" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi b/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi deleted file mode 100644 index 56b21c91..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi +++ /dev/null @@ -1,109 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class StepListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(String)) } - def before - end - - sig { params(_: String).returns(String) } - def before=(_) - end - - sig { returns(T.nilable(T::Array[Symbol])) } - def include - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def include=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig do - params( - thread_id: String, - after: String, - before: String, - include: T::Array[Symbol], - limit: Integer, - order: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(thread_id:, after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - override - .returns( - { - thread_id: String, - after: String, - before: String, - include: T::Array[Symbol], - limit: Integer, - order: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi b/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi deleted file mode 100644 index 9778a156..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi +++ /dev/null @@ -1,66 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class StepRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def thread_id - end - - sig { params(_: String).returns(String) } - def thread_id=(_) - end - - sig { returns(String) } - def run_id - end - - sig { params(_: String).returns(String) } - def run_id=(_) - end - - sig { returns(T.nilable(T::Array[Symbol])) } - def include - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def include=(_) - end - - sig do - params( - thread_id: String, - run_id: String, - include: T::Array[Symbol], - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(thread_id:, run_id:, include: nil, request_options: {}) - end - - sig do - override - .returns( - { - thread_id: String, - run_id: String, - include: T::Array[Symbol], - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi deleted file mode 100644 index 5d68bb14..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi +++ /dev/null @@ -1,26 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class ToolCall < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall], [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall], [Symbol, OpenAI::Models::Beta::Threads::Runs::FunctionToolCall]] - ) - end - private def variants - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi deleted file mode 100644 index fc5420d6..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi +++ /dev/null @@ -1,26 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class ToolCallDelta < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta], [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta], [Symbol, OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta]] - ) - end - private def variants - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi deleted file mode 100644 index f9cb13ff..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi +++ /dev/null @@ -1,94 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class ToolCallDeltaObject < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, - OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta - ) - ] - ) - ) - end - def tool_calls - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, - OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, - OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta - ) - ] - ) - end - def tool_calls=(_) - end - - sig do - params( - tool_calls: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, - OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta - ) - ], - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(tool_calls: nil, type: :tool_calls) - end - - sig do - override - .returns( - { - type: Symbol, - tool_calls: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, - OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta - ) - ] - } - ) - end - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi deleted file mode 100644 index 4d2d6cf2..00000000 --- a/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi +++ /dev/null @@ -1,92 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - module Runs - class ToolCallsStepDetails < OpenAI::BaseModel - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, - OpenAI::Models::Beta::Threads::Runs::FunctionToolCall - ) - ] - ) - end - def tool_calls - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, - OpenAI::Models::Beta::Threads::Runs::FunctionToolCall - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, - OpenAI::Models::Beta::Threads::Runs::FunctionToolCall - ) - ] - ) - end - def tool_calls=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - tool_calls: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, - OpenAI::Models::Beta::Threads::Runs::FunctionToolCall - ) - ], - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(tool_calls:, type: :tool_calls) - end - - sig do - override - .returns( - { - tool_calls: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, - OpenAI::Models::Beta::Threads::Runs::FunctionToolCall - ) - ], - type: Symbol - } - ) - end - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/text.rbi b/rbi/lib/openai/models/beta/threads/text.rbi deleted file mode 100644 index 97fe10b1..00000000 --- a/rbi/lib/openai/models/beta/threads/text.rbi +++ /dev/null @@ -1,85 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class Text < OpenAI::BaseModel - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::FileCitationAnnotation, - OpenAI::Models::Beta::Threads::FilePathAnnotation - ) - ] - ) - end - def annotations - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::FileCitationAnnotation, - OpenAI::Models::Beta::Threads::FilePathAnnotation - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::FileCitationAnnotation, - OpenAI::Models::Beta::Threads::FilePathAnnotation - ) - ] - ) - end - def annotations=(_) - end - - sig { returns(String) } - def value - end - - sig { params(_: String).returns(String) } - def value=(_) - end - - sig do - params( - annotations: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::FileCitationAnnotation, - OpenAI::Models::Beta::Threads::FilePathAnnotation - ) - ], - value: String - ) - .returns(T.attached_class) - end - def self.new(annotations:, value:) - end - - sig do - override - .returns( - { - annotations: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::FileCitationAnnotation, - OpenAI::Models::Beta::Threads::FilePathAnnotation - ) - ], - value: String - } - ) - end - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/text_content_block.rbi b/rbi/lib/openai/models/beta/threads/text_content_block.rbi deleted file mode 100644 index d714859a..00000000 --- a/rbi/lib/openai/models/beta/threads/text_content_block.rbi +++ /dev/null @@ -1,35 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class TextContentBlock < OpenAI::BaseModel - sig { returns(OpenAI::Models::Beta::Threads::Text) } - def text - end - - sig { params(_: OpenAI::Models::Beta::Threads::Text).returns(OpenAI::Models::Beta::Threads::Text) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(text: OpenAI::Models::Beta::Threads::Text, type: Symbol).returns(T.attached_class) } - def self.new(text:, type: :text) - end - - sig { override.returns({text: OpenAI::Models::Beta::Threads::Text, type: Symbol}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi b/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi deleted file mode 100644 index dcfd074b..00000000 --- a/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi +++ /dev/null @@ -1,35 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class TextContentBlockParam < OpenAI::BaseModel - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(text: String, type: Symbol).returns(T.attached_class) } - def self.new(text:, type: :text) - end - - sig { override.returns({text: String, type: Symbol}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/text_delta.rbi b/rbi/lib/openai/models/beta/threads/text_delta.rbi deleted file mode 100644 index 3ffea679..00000000 --- a/rbi/lib/openai/models/beta/threads/text_delta.rbi +++ /dev/null @@ -1,87 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class TextDelta < OpenAI::BaseModel - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, - OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation - ) - ] - ) - ) - end - def annotations - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, - OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, - OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation - ) - ] - ) - end - def annotations=(_) - end - - sig { returns(T.nilable(String)) } - def value - end - - sig { params(_: String).returns(String) } - def value=(_) - end - - sig do - params( - annotations: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, - OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation - ) - ], - value: String - ) - .returns(T.attached_class) - end - def self.new(annotations: nil, value: nil) - end - - sig do - override - .returns( - { - annotations: T::Array[ - T.any( - OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, - OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation - ) - ], - value: String - } - ) - end - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/beta/threads/text_delta_block.rbi b/rbi/lib/openai/models/beta/threads/text_delta_block.rbi deleted file mode 100644 index a3e06fc4..00000000 --- a/rbi/lib/openai/models/beta/threads/text_delta_block.rbi +++ /dev/null @@ -1,46 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Beta - module Threads - class TextDeltaBlock < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Beta::Threads::TextDelta)) } - def text - end - - sig { params(_: OpenAI::Models::Beta::Threads::TextDelta).returns(OpenAI::Models::Beta::Threads::TextDelta) } - def text=(_) - end - - sig do - params(index: Integer, text: OpenAI::Models::Beta::Threads::TextDelta, type: Symbol) - .returns(T.attached_class) - end - def self.new(index:, text: nil, type: :text) - end - - sig { override.returns({index: Integer, type: Symbol, text: OpenAI::Models::Beta::Threads::TextDelta}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion.rbi b/rbi/lib/openai/models/chat/chat_completion.rbi deleted file mode 100644 index 85749208..00000000 --- a/rbi/lib/openai/models/chat/chat_completion.rbi +++ /dev/null @@ -1,261 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletion = T.type_alias { Chat::ChatCompletion } - - module Chat - class ChatCompletion < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletion::Choice]) } - def choices - end - - sig do - params(_: T::Array[OpenAI::Models::Chat::ChatCompletion::Choice]) - .returns(T::Array[OpenAI::Models::Chat::ChatCompletion::Choice]) - end - def choices=(_) - end - - sig { returns(Integer) } - def created - end - - sig { params(_: Integer).returns(Integer) } - def created=(_) - end - - sig { returns(String) } - def model - end - - sig { params(_: String).returns(String) } - def model=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(T.nilable(Symbol)) } - def service_tier - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def service_tier=(_) - end - - sig { returns(T.nilable(String)) } - def system_fingerprint - end - - sig { params(_: String).returns(String) } - def system_fingerprint=(_) - end - - sig { returns(T.nilable(OpenAI::Models::CompletionUsage)) } - def usage - end - - sig { params(_: OpenAI::Models::CompletionUsage).returns(OpenAI::Models::CompletionUsage) } - def usage=(_) - end - - sig do - params( - id: String, - choices: T::Array[OpenAI::Models::Chat::ChatCompletion::Choice], - created: Integer, - model: String, - service_tier: T.nilable(Symbol), - system_fingerprint: String, - usage: OpenAI::Models::CompletionUsage, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - choices:, - created:, - model:, - service_tier: nil, - system_fingerprint: nil, - usage: nil, - object: :"chat.completion" - ) - end - - sig do - override - .returns( - { - id: String, - choices: T::Array[OpenAI::Models::Chat::ChatCompletion::Choice], - created: Integer, - model: String, - object: Symbol, - service_tier: T.nilable(Symbol), - system_fingerprint: String, - usage: OpenAI::Models::CompletionUsage - } - ) - end - def to_hash - end - - class Choice < OpenAI::BaseModel - sig { returns(Symbol) } - def finish_reason - end - - sig { params(_: Symbol).returns(Symbol) } - def finish_reason=(_) - end - - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs)) } - def logprobs - end - - sig do - params(_: T.nilable(OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs)) - .returns(T.nilable(OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs)) - end - def logprobs=(_) - end - - sig { returns(OpenAI::Models::Chat::ChatCompletionMessage) } - def message - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionMessage).returns(OpenAI::Models::Chat::ChatCompletionMessage) - end - def message=(_) - end - - sig do - params( - finish_reason: Symbol, - index: Integer, - logprobs: T.nilable(OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs), - message: OpenAI::Models::Chat::ChatCompletionMessage - ) - .returns(T.attached_class) - end - def self.new(finish_reason:, index:, logprobs:, message:) - end - - sig do - override - .returns( - { - finish_reason: Symbol, - index: Integer, - logprobs: T.nilable(OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs), - message: OpenAI::Models::Chat::ChatCompletionMessage - } - ) - end - def to_hash - end - - class FinishReason < OpenAI::Enum - abstract! - - STOP = :stop - LENGTH = :length - TOOL_CALLS = :tool_calls - CONTENT_FILTER = :content_filter - FUNCTION_CALL = :function_call - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Logprobs < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } - def content - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) - .returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) - end - def content=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } - def refusal - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) - .returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) - end - def refusal=(_) - end - - sig do - params( - content: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]), - refusal: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]) - ) - .returns(T.attached_class) - end - def self.new(content:, refusal:) - end - - sig do - override - .returns( - { - content: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]), - refusal: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]) - } - ) - end - def to_hash - end - end - end - - class ServiceTier < OpenAI::Enum - abstract! - - SCALE = T.let(:scale, T.nilable(Symbol)) - DEFAULT = T.let(:default, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi deleted file mode 100644 index 10203986..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi +++ /dev/null @@ -1,263 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionAssistantMessageParam = T.type_alias { Chat::ChatCompletionAssistantMessageParam } - - module Chat - class ChatCompletionAssistantMessageParam < OpenAI::BaseModel - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio)) } - def audio - end - - sig do - params(_: T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio)) - .returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio)) - end - def audio=(_) - end - - sig do - returns( - T.nilable( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartRefusal - ) - ] - ) - ) - ) - end - def content - end - - sig do - params( - _: T.nilable( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartRefusal - ) - ] - ) - ) - ) - .returns( - T.nilable( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartRefusal - ) - ] - ) - ) - ) - end - def content=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall)) } - def function_call - end - - sig do - params(_: T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall)) - .returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall)) - end - def function_call=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(T.nilable(String)) } - def refusal - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def refusal=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall])) } - def tool_calls - end - - sig do - params(_: T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall]) - .returns(T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall]) - end - def tool_calls=(_) - end - - sig do - params( - audio: T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio), - content: T.nilable( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartRefusal - ) - ] - ) - ), - function_call: T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall), - name: String, - refusal: T.nilable(String), - tool_calls: T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall], - role: Symbol - ) - .returns(T.attached_class) - end - def self.new(audio: nil, content: nil, function_call: nil, name: nil, refusal: nil, tool_calls: nil, role: :assistant) - end - - sig do - override - .returns( - { - role: Symbol, - audio: T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio), - content: T.nilable( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartRefusal - ) - ] - ) - ), - function_call: T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall), - name: String, - refusal: T.nilable(String), - tool_calls: T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall] - } - ) - end - def to_hash - end - - class Audio < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { params(id: String).returns(T.attached_class) } - def self.new(id:) - end - - sig { override.returns({id: String}) } - def to_hash - end - end - - class Content < OpenAI::Union - abstract! - - ArrayOfContentPartArray = T.type_alias do - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartRefusal - ) - ] - end - - class ArrayOfContentPart < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Chat::ChatCompletionContentPartText], [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartRefusal]] - ) - end - private def variants - end - end - end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartRefusal - ) - ] - ] - ] - ) - end - private def variants - end - end - end - - class FunctionCall < OpenAI::BaseModel - sig { returns(String) } - def arguments - end - - sig { params(_: String).returns(String) } - def arguments=(_) - end - - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { params(arguments: String, name: String).returns(T.attached_class) } - def self.new(arguments:, name:) - end - - sig { override.returns({arguments: String, name: String}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_audio.rbi b/rbi/lib/openai/models/chat/chat_completion_audio.rbi deleted file mode 100644 index f8431d64..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_audio.rbi +++ /dev/null @@ -1,53 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionAudio = T.type_alias { Chat::ChatCompletionAudio } - - module Chat - class ChatCompletionAudio < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def data - end - - sig { params(_: String).returns(String) } - def data=(_) - end - - sig { returns(Integer) } - def expires_at - end - - sig { params(_: Integer).returns(Integer) } - def expires_at=(_) - end - - sig { returns(String) } - def transcript - end - - sig { params(_: String).returns(String) } - def transcript=(_) - end - - sig do - params(id: String, data: String, expires_at: Integer, transcript: String).returns(T.attached_class) - end - def self.new(id:, data:, expires_at:, transcript:) - end - - sig { override.returns({id: String, data: String, expires_at: Integer, transcript: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi b/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi deleted file mode 100644 index 0cbbdba5..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi +++ /dev/null @@ -1,70 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionAudioParam = T.type_alias { Chat::ChatCompletionAudioParam } - - module Chat - class ChatCompletionAudioParam < OpenAI::BaseModel - sig { returns(Symbol) } - def format_ - end - - sig { params(_: Symbol).returns(Symbol) } - def format_=(_) - end - - sig { returns(Symbol) } - def voice - end - - sig { params(_: Symbol).returns(Symbol) } - def voice=(_) - end - - sig { params(format_: Symbol, voice: Symbol).returns(T.attached_class) } - def self.new(format_:, voice:) - end - - sig { override.returns({format_: Symbol, voice: Symbol}) } - def to_hash - end - - class Format < OpenAI::Enum - abstract! - - WAV = :wav - MP3 = :mp3 - FLAC = :flac - OPUS = :opus - PCM16 = :pcm16 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Voice < OpenAI::Enum - abstract! - - ALLOY = :alloy - ASH = :ash - BALLAD = :ballad - CORAL = :coral - ECHO = :echo - SAGE = :sage - SHIMMER = :shimmer - VERSE = :verse - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_chunk.rbi b/rbi/lib/openai/models/chat/chat_completion_chunk.rbi deleted file mode 100644 index 12f6d8b5..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_chunk.rbi +++ /dev/null @@ -1,481 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionChunk = T.type_alias { Chat::ChatCompletionChunk } - - module Chat - class ChatCompletionChunk < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice]) } - def choices - end - - sig do - params(_: T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice]) - .returns(T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice]) - end - def choices=(_) - end - - sig { returns(Integer) } - def created - end - - sig { params(_: Integer).returns(Integer) } - def created=(_) - end - - sig { returns(String) } - def model - end - - sig { params(_: String).returns(String) } - def model=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(T.nilable(Symbol)) } - def service_tier - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def service_tier=(_) - end - - sig { returns(T.nilable(String)) } - def system_fingerprint - end - - sig { params(_: String).returns(String) } - def system_fingerprint=(_) - end - - sig { returns(T.nilable(OpenAI::Models::CompletionUsage)) } - def usage - end - - sig { params(_: T.nilable(OpenAI::Models::CompletionUsage)).returns(T.nilable(OpenAI::Models::CompletionUsage)) } - def usage=(_) - end - - sig do - params( - id: String, - choices: T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice], - created: Integer, - model: String, - service_tier: T.nilable(Symbol), - system_fingerprint: String, - usage: T.nilable(OpenAI::Models::CompletionUsage), - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - choices:, - created:, - model:, - service_tier: nil, - system_fingerprint: nil, - usage: nil, - object: :"chat.completion.chunk" - ) - end - - sig do - override - .returns( - { - id: String, - choices: T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice], - created: Integer, - model: String, - object: Symbol, - service_tier: T.nilable(Symbol), - system_fingerprint: String, - usage: T.nilable(OpenAI::Models::CompletionUsage) - } - ) - end - def to_hash - end - - class Choice < OpenAI::BaseModel - sig { returns(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta) } - def delta - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta) - .returns(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta) - end - def delta=(_) - end - - sig { returns(T.nilable(Symbol)) } - def finish_reason - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def finish_reason=(_) - end - - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs)) } - def logprobs - end - - sig do - params(_: T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs)) - .returns(T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs)) - end - def logprobs=(_) - end - - sig do - params( - delta: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta, - finish_reason: T.nilable(Symbol), - index: Integer, - logprobs: T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs) - ) - .returns(T.attached_class) - end - def self.new(delta:, finish_reason:, index:, logprobs: nil) - end - - sig do - override - .returns( - { - delta: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta, - finish_reason: T.nilable(Symbol), - index: Integer, - logprobs: T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs) - } - ) - end - def to_hash - end - - class Delta < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def content - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def content=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall)) } - def function_call - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall) - .returns(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall) - end - def function_call=(_) - end - - sig { returns(T.nilable(String)) } - def refusal - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def refusal=(_) - end - - sig { returns(T.nilable(Symbol)) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall])) } - def tool_calls - end - - sig do - params(_: T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall]) - .returns(T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall]) - end - def tool_calls=(_) - end - - sig do - params( - content: T.nilable(String), - function_call: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, - refusal: T.nilable(String), - role: Symbol, - tool_calls: T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] - ) - .returns(T.attached_class) - end - def self.new(content: nil, function_call: nil, refusal: nil, role: nil, tool_calls: nil) - end - - sig do - override - .returns( - { - content: T.nilable(String), - function_call: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, - refusal: T.nilable(String), - role: Symbol, - tool_calls: T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] - } - ) - end - def to_hash - end - - class FunctionCall < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def arguments - end - - sig { params(_: String).returns(String) } - def arguments=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { params(arguments: String, name: String).returns(T.attached_class) } - def self.new(arguments: nil, name: nil) - end - - sig { override.returns({arguments: String, name: String}) } - def to_hash - end - end - - class Role < OpenAI::Enum - abstract! - - DEVELOPER = :developer - SYSTEM = :system - USER = :user - ASSISTANT = :assistant - TOOL = :tool - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class ToolCall < OpenAI::BaseModel - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(T.nilable(String)) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function)) } - def function - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function) - .returns(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function) - end - def function=(_) - end - - sig { returns(T.nilable(Symbol)) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - index: Integer, - id: String, - function: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(index:, id: nil, function: nil, type: nil) - end - - sig do - override - .returns( - { - index: Integer, - id: String, - function: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, - type: Symbol - } - ) - end - def to_hash - end - - class Function < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def arguments - end - - sig { params(_: String).returns(String) } - def arguments=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { params(arguments: String, name: String).returns(T.attached_class) } - def self.new(arguments: nil, name: nil) - end - - sig { override.returns({arguments: String, name: String}) } - def to_hash - end - end - - class Type < OpenAI::Enum - abstract! - - FUNCTION = :function - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - - class FinishReason < OpenAI::Enum - abstract! - - STOP = T.let(:stop, T.nilable(Symbol)) - LENGTH = T.let(:length, T.nilable(Symbol)) - TOOL_CALLS = T.let(:tool_calls, T.nilable(Symbol)) - CONTENT_FILTER = T.let(:content_filter, T.nilable(Symbol)) - FUNCTION_CALL = T.let(:function_call, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Logprobs < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } - def content - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) - .returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) - end - def content=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } - def refusal - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) - .returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) - end - def refusal=(_) - end - - sig do - params( - content: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]), - refusal: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]) - ) - .returns(T.attached_class) - end - def self.new(content:, refusal:) - end - - sig do - override - .returns( - { - content: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]), - refusal: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]) - } - ) - end - def to_hash - end - end - end - - class ServiceTier < OpenAI::Enum - abstract! - - SCALE = T.let(:scale, T.nilable(Symbol)) - DEFAULT = T.let(:default, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part.rbi deleted file mode 100644 index 02a241f0..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_content_part.rbi +++ /dev/null @@ -1,90 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionContentPart = T.type_alias { Chat::ChatCompletionContentPart } - - module Chat - class ChatCompletionContentPart < OpenAI::Union - abstract! - - class File < OpenAI::BaseModel - sig { returns(OpenAI::Models::Chat::ChatCompletionContentPart::File::File) } - def file - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionContentPart::File::File) - .returns(OpenAI::Models::Chat::ChatCompletionContentPart::File::File) - end - def file=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(file: OpenAI::Models::Chat::ChatCompletionContentPart::File::File, type: Symbol) - .returns(T.attached_class) - end - def self.new(file:, type: :file) - end - - sig { override.returns({file: OpenAI::Models::Chat::ChatCompletionContentPart::File::File, type: Symbol}) } - def to_hash - end - - class File < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def file_data - end - - sig { params(_: String).returns(String) } - def file_data=(_) - end - - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(T.nilable(String)) } - def file_name - end - - sig { params(_: String).returns(String) } - def file_name=(_) - end - - sig { params(file_data: String, file_id: String, file_name: String).returns(T.attached_class) } - def self.new(file_data: nil, file_id: nil, file_name: nil) - end - - sig { override.returns({file_data: String, file_id: String, file_name: String}) } - def to_hash - end - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Chat::ChatCompletionContentPartText], [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartImage], [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio], [Symbol, OpenAI::Models::Chat::ChatCompletionContentPart::File]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi deleted file mode 100644 index e74c6c80..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi +++ /dev/null @@ -1,83 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionContentPartImage = T.type_alias { Chat::ChatCompletionContentPartImage } - - module Chat - class ChatCompletionContentPartImage < OpenAI::BaseModel - sig { returns(OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL) } - def image_url - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL) - .returns(OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL) - end - def image_url=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(image_url: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL, type: Symbol) - .returns(T.attached_class) - end - def self.new(image_url:, type: :image_url) - end - - sig do - override.returns({image_url: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL, type: Symbol}) - end - def to_hash - end - - class ImageURL < OpenAI::BaseModel - sig { returns(String) } - def url - end - - sig { params(_: String).returns(String) } - def url=(_) - end - - sig { returns(T.nilable(Symbol)) } - def detail - end - - sig { params(_: Symbol).returns(Symbol) } - def detail=(_) - end - - sig { params(url: String, detail: Symbol).returns(T.attached_class) } - def self.new(url:, detail: nil) - end - - sig { override.returns({url: String, detail: Symbol}) } - def to_hash - end - - class Detail < OpenAI::Enum - abstract! - - AUTO = :auto - LOW = :low - HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi deleted file mode 100644 index bff9cc3b..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi +++ /dev/null @@ -1,85 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionContentPartInputAudio = T.type_alias { Chat::ChatCompletionContentPartInputAudio } - - module Chat - class ChatCompletionContentPartInputAudio < OpenAI::BaseModel - sig { returns(OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio) } - def input_audio - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio) - .returns(OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio) - end - def input_audio=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(input_audio: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio, type: Symbol) - .returns(T.attached_class) - end - def self.new(input_audio:, type: :input_audio) - end - - sig do - override - .returns( - {input_audio: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio, type: Symbol} - ) - end - def to_hash - end - - class InputAudio < OpenAI::BaseModel - sig { returns(String) } - def data - end - - sig { params(_: String).returns(String) } - def data=(_) - end - - sig { returns(Symbol) } - def format_ - end - - sig { params(_: Symbol).returns(Symbol) } - def format_=(_) - end - - sig { params(data: String, format_: Symbol).returns(T.attached_class) } - def self.new(data:, format_:) - end - - sig { override.returns({data: String, format_: Symbol}) } - def to_hash - end - - class Format < OpenAI::Enum - abstract! - - WAV = :wav - MP3 = :mp3 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi deleted file mode 100644 index 263c9c0b..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi +++ /dev/null @@ -1,35 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionContentPartRefusal = T.type_alias { Chat::ChatCompletionContentPartRefusal } - - module Chat - class ChatCompletionContentPartRefusal < OpenAI::BaseModel - sig { returns(String) } - def refusal - end - - sig { params(_: String).returns(String) } - def refusal=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(refusal: String, type: Symbol).returns(T.attached_class) } - def self.new(refusal:, type: :refusal) - end - - sig { override.returns({refusal: String, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi deleted file mode 100644 index 84a24c5d..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi +++ /dev/null @@ -1,35 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionContentPartText = T.type_alias { Chat::ChatCompletionContentPartText } - - module Chat - class ChatCompletionContentPartText < OpenAI::BaseModel - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(text: String, type: Symbol).returns(T.attached_class) } - def self.new(text:, type: :text) - end - - sig { override.returns({text: String, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_deleted.rbi b/rbi/lib/openai/models/chat/chat_completion_deleted.rbi deleted file mode 100644 index 763165dc..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_deleted.rbi +++ /dev/null @@ -1,43 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionDeleted = T.type_alias { Chat::ChatCompletionDeleted } - - module Chat - class ChatCompletionDeleted < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Boolean) } - def deleted - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def deleted=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { params(id: String, deleted: T::Boolean, object: Symbol).returns(T.attached_class) } - def self.new(id:, deleted:, object: :"chat.completion.deleted") - end - - sig { override.returns({id: String, deleted: T::Boolean, object: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi deleted file mode 100644 index b7f2d9e7..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi +++ /dev/null @@ -1,77 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionDeveloperMessageParam = T.type_alias { Chat::ChatCompletionDeveloperMessageParam } - - module Chat - class ChatCompletionDeveloperMessageParam < OpenAI::BaseModel - sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } - def content - end - - sig do - params(_: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) - .returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig do - params( - content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), - name: String, - role: Symbol - ) - .returns(T.attached_class) - end - def self.new(content:, name: nil, role: :developer) - end - - sig do - override - .returns( - { - content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), - role: Symbol, - name: String - } - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } - - class << self - sig do - override - .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi b/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi deleted file mode 100644 index abf316d2..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi +++ /dev/null @@ -1,27 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionFunctionCallOption = T.type_alias { Chat::ChatCompletionFunctionCallOption } - - module Chat - class ChatCompletionFunctionCallOption < OpenAI::BaseModel - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { params(name: String).returns(T.attached_class) } - def self.new(name:) - end - - sig { override.returns({name: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi deleted file mode 100644 index 1cf6ef40..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi +++ /dev/null @@ -1,43 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionFunctionMessageParam = T.type_alias { Chat::ChatCompletionFunctionMessageParam } - - module Chat - class ChatCompletionFunctionMessageParam < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def content - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def content=(_) - end - - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { params(content: T.nilable(String), name: String, role: Symbol).returns(T.attached_class) } - def self.new(content:, name:, role: :function) - end - - sig { override.returns({content: T.nilable(String), name: String, role: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_message.rbi b/rbi/lib/openai/models/chat/chat_completion_message.rbi deleted file mode 100644 index 91a6344d..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_message.rbi +++ /dev/null @@ -1,223 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionMessage = T.type_alias { Chat::ChatCompletionMessage } - - module Chat - class ChatCompletionMessage < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def content - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def content=(_) - end - - sig { returns(T.nilable(String)) } - def refusal - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def refusal=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation])) } - def annotations - end - - sig do - params(_: T::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation]) - .returns(T::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation]) - end - def annotations=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAudio)) } - def audio - end - - sig do - params(_: T.nilable(OpenAI::Models::Chat::ChatCompletionAudio)) - .returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAudio)) - end - def audio=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall)) } - def function_call - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall) - .returns(OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall) - end - def function_call=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall])) } - def tool_calls - end - - sig do - params(_: T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall]) - .returns(T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall]) - end - def tool_calls=(_) - end - - sig do - params( - content: T.nilable(String), - refusal: T.nilable(String), - annotations: T::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation], - audio: T.nilable(OpenAI::Models::Chat::ChatCompletionAudio), - function_call: OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall, - tool_calls: T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall], - role: Symbol - ) - .returns(T.attached_class) - end - def self.new(content:, refusal:, annotations: nil, audio: nil, function_call: nil, tool_calls: nil, role: :assistant) - end - - sig do - override - .returns( - { - content: T.nilable(String), - refusal: T.nilable(String), - role: Symbol, - annotations: T::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation], - audio: T.nilable(OpenAI::Models::Chat::ChatCompletionAudio), - function_call: OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall, - tool_calls: T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall] - } - ) - end - def to_hash - end - - class Annotation < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation) } - def url_citation - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation) - .returns(OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation) - end - def url_citation=(_) - end - - sig do - params(url_citation: OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation, type: Symbol) - .returns(T.attached_class) - end - def self.new(url_citation:, type: :url_citation) - end - - sig do - override - .returns( - {type: Symbol, url_citation: OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation} - ) - end - def to_hash - end - - class URLCitation < OpenAI::BaseModel - sig { returns(Integer) } - def end_index - end - - sig { params(_: Integer).returns(Integer) } - def end_index=(_) - end - - sig { returns(Integer) } - def start_index - end - - sig { params(_: Integer).returns(Integer) } - def start_index=(_) - end - - sig { returns(String) } - def title - end - - sig { params(_: String).returns(String) } - def title=(_) - end - - sig { returns(String) } - def url - end - - sig { params(_: String).returns(String) } - def url=(_) - end - - sig do - params( - end_index: Integer, - start_index: Integer, - title: String, - url: String - ).returns(T.attached_class) - end - def self.new(end_index:, start_index:, title:, url:) - end - - sig { override.returns({end_index: Integer, start_index: Integer, title: String, url: String}) } - def to_hash - end - end - end - - class FunctionCall < OpenAI::BaseModel - sig { returns(String) } - def arguments - end - - sig { params(_: String).returns(String) } - def arguments=(_) - end - - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { params(arguments: String, name: String).returns(T.attached_class) } - def self.new(arguments:, name:) - end - - sig { override.returns({arguments: String, name: String}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_message_param.rbi deleted file mode 100644 index 471f5f89..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_message_param.rbi +++ /dev/null @@ -1,24 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionMessageParam = T.type_alias { Chat::ChatCompletionMessageParam } - - module Chat - class ChatCompletionMessageParam < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam], [Symbol, OpenAI::Models::Chat::ChatCompletionSystemMessageParam], [Symbol, OpenAI::Models::Chat::ChatCompletionUserMessageParam], [Symbol, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam], [Symbol, OpenAI::Models::Chat::ChatCompletionToolMessageParam], [Symbol, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi b/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi deleted file mode 100644 index ca4c6ac0..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi +++ /dev/null @@ -1,80 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionMessageToolCall = T.type_alias { Chat::ChatCompletionMessageToolCall } - - module Chat - class ChatCompletionMessageToolCall < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function) } - def function - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function) - .returns(OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function) - end - def function=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(id: String, function: OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function, type: Symbol) - .returns(T.attached_class) - end - def self.new(id:, function:, type: :function) - end - - sig do - override - .returns( - {id: String, function: OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function, type: Symbol} - ) - end - def to_hash - end - - class Function < OpenAI::BaseModel - sig { returns(String) } - def arguments - end - - sig { params(_: String).returns(String) } - def arguments=(_) - end - - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { params(arguments: String, name: String).returns(T.attached_class) } - def self.new(arguments:, name:) - end - - sig { override.returns({arguments: String, name: String}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_modality.rbi b/rbi/lib/openai/models/chat/chat_completion_modality.rbi deleted file mode 100644 index bb95e5c8..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_modality.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionModality = T.type_alias { Chat::ChatCompletionModality } - - module Chat - class ChatCompletionModality < OpenAI::Enum - abstract! - - TEXT = :text - AUDIO = :audio - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi b/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi deleted file mode 100644 index 89db6837..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi +++ /dev/null @@ -1,59 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionNamedToolChoice = T.type_alias { Chat::ChatCompletionNamedToolChoice } - - module Chat - class ChatCompletionNamedToolChoice < OpenAI::BaseModel - sig { returns(OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function) } - def function - end - - sig do - params(_: OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function) - .returns(OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function) - end - def function=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(function: OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function, type: Symbol) - .returns(T.attached_class) - end - def self.new(function:, type: :function) - end - - sig { override.returns({function: OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function, type: Symbol}) } - def to_hash - end - - class Function < OpenAI::BaseModel - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { params(name: String).returns(T.attached_class) } - def self.new(name:) - end - - sig { override.returns({name: String}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi b/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi deleted file mode 100644 index 279b9fbe..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi +++ /dev/null @@ -1,64 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionPredictionContent = T.type_alias { Chat::ChatCompletionPredictionContent } - - module Chat - class ChatCompletionPredictionContent < OpenAI::BaseModel - sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } - def content - end - - sig do - params(_: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) - .returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) - end - def content=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(content:, type: :content) - end - - sig do - override - .returns( - {content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), type: Symbol} - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } - - class << self - sig do - override - .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_reasoning_effort.rbi b/rbi/lib/openai/models/chat/chat_completion_reasoning_effort.rbi deleted file mode 100644 index 204ceb39..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_reasoning_effort.rbi +++ /dev/null @@ -1,11 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionReasoningEffort = T.type_alias { Chat::ChatCompletionReasoningEffort } - - module Chat - ChatCompletionReasoningEffort = T.type_alias { OpenAI::Models::ReasoningEffort } - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_role.rbi b/rbi/lib/openai/models/chat/chat_completion_role.rbi deleted file mode 100644 index 807d6735..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_role.rbi +++ /dev/null @@ -1,26 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionRole = T.type_alias { Chat::ChatCompletionRole } - - module Chat - class ChatCompletionRole < OpenAI::Enum - abstract! - - DEVELOPER = :developer - SYSTEM = :system - USER = :user - ASSISTANT = :assistant - TOOL = :tool - FUNCTION = :function - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_store_message.rbi b/rbi/lib/openai/models/chat/chat_completion_store_message.rbi deleted file mode 100644 index 0c487bdc..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_store_message.rbi +++ /dev/null @@ -1,27 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionStoreMessage = T.type_alias { Chat::ChatCompletionStoreMessage } - - module Chat - class ChatCompletionStoreMessage < OpenAI::Models::Chat::ChatCompletionMessage - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { params(id: String).returns(T.attached_class) } - def self.new(id:) - end - - sig { override.returns({id: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi b/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi deleted file mode 100644 index 88104415..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi +++ /dev/null @@ -1,27 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionStreamOptions = T.type_alias { Chat::ChatCompletionStreamOptions } - - module Chat - class ChatCompletionStreamOptions < OpenAI::BaseModel - sig { returns(T.nilable(T::Boolean)) } - def include_usage - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def include_usage=(_) - end - - sig { params(include_usage: T::Boolean).returns(T.attached_class) } - def self.new(include_usage: nil) - end - - sig { override.returns({include_usage: T::Boolean}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi deleted file mode 100644 index 4c4e8def..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi +++ /dev/null @@ -1,77 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionSystemMessageParam = T.type_alias { Chat::ChatCompletionSystemMessageParam } - - module Chat - class ChatCompletionSystemMessageParam < OpenAI::BaseModel - sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } - def content - end - - sig do - params(_: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) - .returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig do - params( - content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), - name: String, - role: Symbol - ) - .returns(T.attached_class) - end - def self.new(content:, name: nil, role: :system) - end - - sig do - override - .returns( - { - content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), - role: Symbol, - name: String - } - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } - - class << self - sig do - override - .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi b/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi deleted file mode 100644 index c280c13e..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi +++ /dev/null @@ -1,112 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionTokenLogprob = T.type_alias { Chat::ChatCompletionTokenLogprob } - - module Chat - class ChatCompletionTokenLogprob < OpenAI::BaseModel - sig { returns(String) } - def token - end - - sig { params(_: String).returns(String) } - def token=(_) - end - - sig { returns(T.nilable(T::Array[Integer])) } - def bytes - end - - sig { params(_: T.nilable(T::Array[Integer])).returns(T.nilable(T::Array[Integer])) } - def bytes=(_) - end - - sig { returns(Float) } - def logprob - end - - sig { params(_: Float).returns(Float) } - def logprob=(_) - end - - sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob]) } - def top_logprobs - end - - sig do - params(_: T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob]) - .returns(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob]) - end - def top_logprobs=(_) - end - - sig do - params( - token: String, - bytes: T.nilable(T::Array[Integer]), - logprob: Float, - top_logprobs: T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob] - ) - .returns(T.attached_class) - end - def self.new(token:, bytes:, logprob:, top_logprobs:) - end - - sig do - override - .returns( - { - token: String, - bytes: T.nilable(T::Array[Integer]), - logprob: Float, - top_logprobs: T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob] - } - ) - end - def to_hash - end - - class TopLogprob < OpenAI::BaseModel - sig { returns(String) } - def token - end - - sig { params(_: String).returns(String) } - def token=(_) - end - - sig { returns(T.nilable(T::Array[Integer])) } - def bytes - end - - sig { params(_: T.nilable(T::Array[Integer])).returns(T.nilable(T::Array[Integer])) } - def bytes=(_) - end - - sig { returns(Float) } - def logprob - end - - sig { params(_: Float).returns(Float) } - def logprob=(_) - end - - sig do - params( - token: String, - bytes: T.nilable(T::Array[Integer]), - logprob: Float - ).returns(T.attached_class) - end - def self.new(token:, bytes:, logprob:) - end - - sig { override.returns({token: String, bytes: T.nilable(T::Array[Integer]), logprob: Float}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_tool.rbi b/rbi/lib/openai/models/chat/chat_completion_tool.rbi deleted file mode 100644 index 363528f2..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_tool.rbi +++ /dev/null @@ -1,35 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionTool = T.type_alias { Chat::ChatCompletionTool } - - module Chat - class ChatCompletionTool < OpenAI::BaseModel - sig { returns(OpenAI::Models::FunctionDefinition) } - def function - end - - sig { params(_: OpenAI::Models::FunctionDefinition).returns(OpenAI::Models::FunctionDefinition) } - def function=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(function: OpenAI::Models::FunctionDefinition, type: Symbol).returns(T.attached_class) } - def self.new(function:, type: :function) - end - - sig { override.returns({function: OpenAI::Models::FunctionDefinition, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi b/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi deleted file mode 100644 index 3d7a6aea..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionToolChoiceOption = T.type_alias { Chat::ChatCompletionToolChoiceOption } - - module Chat - class ChatCompletionToolChoiceOption < OpenAI::Union - abstract! - - class Auto < OpenAI::Enum - abstract! - - NONE = :none - AUTO = :auto - REQUIRED = :required - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, OpenAI::Models::Chat::ChatCompletionNamedToolChoice]]) } - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi deleted file mode 100644 index e320aa6c..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi +++ /dev/null @@ -1,77 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionToolMessageParam = T.type_alias { Chat::ChatCompletionToolMessageParam } - - module Chat - class ChatCompletionToolMessageParam < OpenAI::BaseModel - sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } - def content - end - - sig do - params(_: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) - .returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(String) } - def tool_call_id - end - - sig { params(_: String).returns(String) } - def tool_call_id=(_) - end - - sig do - params( - content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), - tool_call_id: String, - role: Symbol - ) - .returns(T.attached_class) - end - def self.new(content:, tool_call_id:, role: :tool) - end - - sig do - override - .returns( - { - content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), - role: Symbol, - tool_call_id: String - } - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } - - class << self - sig do - override - .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi deleted file mode 100644 index a41ad724..00000000 --- a/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi +++ /dev/null @@ -1,159 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ChatCompletionUserMessageParam = T.type_alias { Chat::ChatCompletionUserMessageParam } - - module Chat - class ChatCompletionUserMessageParam < OpenAI::BaseModel - sig do - returns( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartImage, - OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, - OpenAI::Models::Chat::ChatCompletionContentPart::File - ) - ] - ) - ) - end - def content - end - - sig do - params( - _: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartImage, - OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, - OpenAI::Models::Chat::ChatCompletionContentPart::File - ) - ] - ) - ) - .returns( - T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartImage, - OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, - OpenAI::Models::Chat::ChatCompletionContentPart::File - ) - ] - ) - ) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig do - params( - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartImage, - OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, - OpenAI::Models::Chat::ChatCompletionContentPart::File - ) - ] - ), - name: String, - role: Symbol - ) - .returns(T.attached_class) - end - def self.new(content:, name: nil, role: :user) - end - - sig do - override - .returns( - { - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartImage, - OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, - OpenAI::Models::Chat::ChatCompletionContentPart::File - ) - ] - ), - role: Symbol, - name: String - } - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - ChatCompletionContentPartArray = T.type_alias do - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartImage, - OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, - OpenAI::Models::Chat::ChatCompletionContentPart::File - ) - ] - end - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [ - NilClass, - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionContentPartText, - OpenAI::Models::Chat::ChatCompletionContentPartImage, - OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, - OpenAI::Models::Chat::ChatCompletionContentPart::File - ) - ] - ] - ] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/completion_create_params.rbi b/rbi/lib/openai/models/chat/completion_create_params.rbi deleted file mode 100644 index 77aab350..00000000 --- a/rbi/lib/openai/models/chat/completion_create_params.rbi +++ /dev/null @@ -1,758 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Chat - class CompletionCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, - OpenAI::Models::Chat::ChatCompletionSystemMessageParam, - OpenAI::Models::Chat::ChatCompletionUserMessageParam, - OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, - OpenAI::Models::Chat::ChatCompletionToolMessageParam, - OpenAI::Models::Chat::ChatCompletionFunctionMessageParam - ) - ] - ) - end - def messages - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, - OpenAI::Models::Chat::ChatCompletionSystemMessageParam, - OpenAI::Models::Chat::ChatCompletionUserMessageParam, - OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, - OpenAI::Models::Chat::ChatCompletionToolMessageParam, - OpenAI::Models::Chat::ChatCompletionFunctionMessageParam - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, - OpenAI::Models::Chat::ChatCompletionSystemMessageParam, - OpenAI::Models::Chat::ChatCompletionUserMessageParam, - OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, - OpenAI::Models::Chat::ChatCompletionToolMessageParam, - OpenAI::Models::Chat::ChatCompletionFunctionMessageParam - ) - ] - ) - end - def messages=(_) - end - - sig { returns(T.any(String, Symbol)) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAudioParam)) } - def audio - end - - sig do - params(_: T.nilable(OpenAI::Models::Chat::ChatCompletionAudioParam)) - .returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAudioParam)) - end - def audio=(_) - end - - sig { returns(T.nilable(Float)) } - def frequency_penalty - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def frequency_penalty=(_) - end - - sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Chat::ChatCompletionFunctionCallOption))) } - def function_call - end - - sig do - params(_: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionFunctionCallOption)) - .returns(T.any(Symbol, OpenAI::Models::Chat::ChatCompletionFunctionCallOption)) - end - def function_call=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::CompletionCreateParams::Function])) } - def functions - end - - sig do - params(_: T::Array[OpenAI::Models::Chat::CompletionCreateParams::Function]) - .returns(T::Array[OpenAI::Models::Chat::CompletionCreateParams::Function]) - end - def functions=(_) - end - - sig { returns(T.nilable(T::Hash[Symbol, Integer])) } - def logit_bias - end - - sig { params(_: T.nilable(T::Hash[Symbol, Integer])).returns(T.nilable(T::Hash[Symbol, Integer])) } - def logit_bias=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def logprobs - end - - sig { params(_: T.nilable(T::Boolean)).returns(T.nilable(T::Boolean)) } - def logprobs=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_completion_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_completion_tokens=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_tokens=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(T::Array[Symbol])) } - def modalities - end - - sig { params(_: T.nilable(T::Array[Symbol])).returns(T.nilable(T::Array[Symbol])) } - def modalities=(_) - end - - sig { returns(T.nilable(Integer)) } - def n - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def n=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def parallel_tool_calls - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def parallel_tool_calls=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionPredictionContent)) } - def prediction - end - - sig do - params(_: T.nilable(OpenAI::Models::Chat::ChatCompletionPredictionContent)) - .returns(T.nilable(OpenAI::Models::Chat::ChatCompletionPredictionContent)) - end - def prediction=(_) - end - - sig { returns(T.nilable(Float)) } - def presence_penalty - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def presence_penalty=(_) - end - - sig { returns(T.nilable(Symbol)) } - def reasoning_effort - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def reasoning_effort=(_) - end - - sig do - returns( - T.nilable( - T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONSchema, - OpenAI::Models::ResponseFormatJSONObject - ) - ) - ) - end - def response_format - end - - sig do - params( - _: T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONSchema, - OpenAI::Models::ResponseFormatJSONObject - ) - ) - .returns( - T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONSchema, - OpenAI::Models::ResponseFormatJSONObject - ) - ) - end - def response_format=(_) - end - - sig { returns(T.nilable(Integer)) } - def seed - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def seed=(_) - end - - sig { returns(T.nilable(Symbol)) } - def service_tier - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def service_tier=(_) - end - - sig { returns(T.nilable(T.any(String, T::Array[String]))) } - def stop - end - - sig do - params( - _: T.nilable( - T.any( - String, - T::Array[String] - ) - ) - ).returns(T.nilable(T.any(String, T::Array[String]))) - end - def stop=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def store - end - - sig { params(_: T.nilable(T::Boolean)).returns(T.nilable(T::Boolean)) } - def store=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions)) } - def stream_options - end - - sig do - params(_: T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions)) - .returns(T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions)) - end - def stream_options=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def temperature=(_) - end - - sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Chat::ChatCompletionNamedToolChoice))) } - def tool_choice - end - - sig do - params(_: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionNamedToolChoice)) - .returns(T.any(Symbol, OpenAI::Models::Chat::ChatCompletionNamedToolChoice)) - end - def tool_choice=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTool])) } - def tools - end - - sig do - params(_: T::Array[OpenAI::Models::Chat::ChatCompletionTool]) - .returns(T::Array[OpenAI::Models::Chat::ChatCompletionTool]) - end - def tools=(_) - end - - sig { returns(T.nilable(Integer)) } - def top_logprobs - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def top_logprobs=(_) - end - - sig { returns(T.nilable(Float)) } - def top_p - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def top_p=(_) - end - - sig { returns(T.nilable(String)) } - def user - end - - sig { params(_: String).returns(String) } - def user=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions)) } - def web_search_options - end - - sig do - params(_: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions) - .returns(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions) - end - def web_search_options=(_) - end - - sig do - params( - messages: T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, - OpenAI::Models::Chat::ChatCompletionSystemMessageParam, - OpenAI::Models::Chat::ChatCompletionUserMessageParam, - OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, - OpenAI::Models::Chat::ChatCompletionToolMessageParam, - OpenAI::Models::Chat::ChatCompletionFunctionMessageParam - ) - ], - model: T.any(String, Symbol), - audio: T.nilable(OpenAI::Models::Chat::ChatCompletionAudioParam), - frequency_penalty: T.nilable(Float), - function_call: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionFunctionCallOption), - functions: T::Array[OpenAI::Models::Chat::CompletionCreateParams::Function], - logit_bias: T.nilable(T::Hash[Symbol, Integer]), - logprobs: T.nilable(T::Boolean), - max_completion_tokens: T.nilable(Integer), - max_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - modalities: T.nilable(T::Array[Symbol]), - n: T.nilable(Integer), - parallel_tool_calls: T::Boolean, - prediction: T.nilable(OpenAI::Models::Chat::ChatCompletionPredictionContent), - presence_penalty: T.nilable(Float), - reasoning_effort: T.nilable(Symbol), - response_format: T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONSchema, - OpenAI::Models::ResponseFormatJSONObject - ), - seed: T.nilable(Integer), - service_tier: T.nilable(Symbol), - stop: T.nilable(T.any(String, T::Array[String])), - store: T.nilable(T::Boolean), - stream_options: T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions), - temperature: T.nilable(Float), - tool_choice: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionNamedToolChoice), - tools: T::Array[OpenAI::Models::Chat::ChatCompletionTool], - top_logprobs: T.nilable(Integer), - top_p: T.nilable(Float), - user: String, - web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - messages:, - model:, - audio: nil, - frequency_penalty: nil, - function_call: nil, - functions: nil, - logit_bias: nil, - logprobs: nil, - max_completion_tokens: nil, - max_tokens: nil, - metadata: nil, - modalities: nil, - n: nil, - parallel_tool_calls: nil, - prediction: nil, - presence_penalty: nil, - reasoning_effort: nil, - response_format: nil, - seed: nil, - service_tier: nil, - stop: nil, - store: nil, - stream_options: nil, - temperature: nil, - tool_choice: nil, - tools: nil, - top_logprobs: nil, - top_p: nil, - user: nil, - web_search_options: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - messages: T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, - OpenAI::Models::Chat::ChatCompletionSystemMessageParam, - OpenAI::Models::Chat::ChatCompletionUserMessageParam, - OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, - OpenAI::Models::Chat::ChatCompletionToolMessageParam, - OpenAI::Models::Chat::ChatCompletionFunctionMessageParam - ) - ], - model: T.any(String, Symbol), - audio: T.nilable(OpenAI::Models::Chat::ChatCompletionAudioParam), - frequency_penalty: T.nilable(Float), - function_call: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionFunctionCallOption), - functions: T::Array[OpenAI::Models::Chat::CompletionCreateParams::Function], - logit_bias: T.nilable(T::Hash[Symbol, Integer]), - logprobs: T.nilable(T::Boolean), - max_completion_tokens: T.nilable(Integer), - max_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - modalities: T.nilable(T::Array[Symbol]), - n: T.nilable(Integer), - parallel_tool_calls: T::Boolean, - prediction: T.nilable(OpenAI::Models::Chat::ChatCompletionPredictionContent), - presence_penalty: T.nilable(Float), - reasoning_effort: T.nilable(Symbol), - response_format: T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONSchema, - OpenAI::Models::ResponseFormatJSONObject - ), - seed: T.nilable(Integer), - service_tier: T.nilable(Symbol), - stop: T.nilable(T.any(String, T::Array[String])), - store: T.nilable(T::Boolean), - stream_options: T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions), - temperature: T.nilable(Float), - tool_choice: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionNamedToolChoice), - tools: T::Array[OpenAI::Models::Chat::ChatCompletionTool], - top_logprobs: T.nilable(Integer), - top_p: T.nilable(Float), - user: String, - web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class FunctionCall < OpenAI::Union - abstract! - - class FunctionCallMode < OpenAI::Enum - abstract! - - NONE = :none - AUTO = :auto - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, OpenAI::Models::Chat::ChatCompletionFunctionCallOption]]) } - private def variants - end - end - end - - class Function < OpenAI::BaseModel - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(T.nilable(String)) } - def description - end - - sig { params(_: String).returns(String) } - def description=(_) - end - - sig { returns(T.nilable(OpenAI::Models::FunctionParameters)) } - def parameters - end - - sig { params(_: OpenAI::Models::FunctionParameters).returns(OpenAI::Models::FunctionParameters) } - def parameters=(_) - end - - sig do - params(name: String, description: String, parameters: OpenAI::Models::FunctionParameters) - .returns(T.attached_class) - end - def self.new(name:, description: nil, parameters: nil) - end - - sig { override.returns({name: String, description: String, parameters: OpenAI::Models::FunctionParameters}) } - def to_hash - end - end - - class Modality < OpenAI::Enum - abstract! - - TEXT = :text - AUDIO = :audio - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class ResponseFormat < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::ResponseFormatText], [NilClass, OpenAI::Models::ResponseFormatJSONSchema], [NilClass, OpenAI::Models::ResponseFormatJSONObject]] - ) - end - private def variants - end - end - end - - class ServiceTier < OpenAI::Enum - abstract! - - AUTO = T.let(:auto, T.nilable(Symbol)) - DEFAULT = T.let(:default, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Stop < OpenAI::Union - abstract! - - StringArray = T.type_alias { T::Array[String] } - - class << self - sig { override.returns([[NilClass, String], [NilClass, T::Array[String]]]) } - private def variants - end - end - end - - class WebSearchOptions < OpenAI::BaseModel - sig { returns(T.nilable(Symbol)) } - def search_context_size - end - - sig { params(_: Symbol).returns(Symbol) } - def search_context_size=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation)) } - def user_location - end - - sig do - params(_: T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation)) - .returns(T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation)) - end - def user_location=(_) - end - - sig do - params( - search_context_size: Symbol, - user_location: T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation) - ) - .returns(T.attached_class) - end - def self.new(search_context_size: nil, user_location: nil) - end - - sig do - override - .returns( - { - search_context_size: Symbol, - user_location: T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation) - } - ) - end - def to_hash - end - - class SearchContextSize < OpenAI::Enum - abstract! - - LOW = :low - MEDIUM = :medium - HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class UserLocation < OpenAI::BaseModel - sig { returns(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate) } - def approximate - end - - sig do - params(_: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate) - .returns(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate) - end - def approximate=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - approximate: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(approximate:, type: :approximate) - end - - sig do - override - .returns( - { - approximate: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, - type: Symbol - } - ) - end - def to_hash - end - - class Approximate < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def city - end - - sig { params(_: String).returns(String) } - def city=(_) - end - - sig { returns(T.nilable(String)) } - def country - end - - sig { params(_: String).returns(String) } - def country=(_) - end - - sig { returns(T.nilable(String)) } - def region - end - - sig { params(_: String).returns(String) } - def region=(_) - end - - sig { returns(T.nilable(String)) } - def timezone - end - - sig { params(_: String).returns(String) } - def timezone=(_) - end - - sig do - params( - city: String, - country: String, - region: String, - timezone: String - ).returns(T.attached_class) - end - def self.new(city: nil, country: nil, region: nil, timezone: nil) - end - - sig { override.returns({city: String, country: String, region: String, timezone: String}) } - def to_hash - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/completion_delete_params.rbi b/rbi/lib/openai/models/chat/completion_delete_params.rbi deleted file mode 100644 index 6682081d..00000000 --- a/rbi/lib/openai/models/chat/completion_delete_params.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Chat - class CompletionDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/completion_list_params.rbi b/rbi/lib/openai/models/chat/completion_list_params.rbi deleted file mode 100644 index 224d64b7..00000000 --- a/rbi/lib/openai/models/chat/completion_list_params.rbi +++ /dev/null @@ -1,95 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Chat - class CompletionListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(String)) } - def model - end - - sig { params(_: String).returns(String) } - def model=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig do - params( - after: String, - limit: Integer, - metadata: T.nilable(OpenAI::Models::Metadata), - model: String, - order: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {}) - end - - sig do - override - .returns( - { - after: String, - limit: Integer, - metadata: T.nilable(OpenAI::Models::Metadata), - model: String, - order: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/completion_retrieve_params.rbi b/rbi/lib/openai/models/chat/completion_retrieve_params.rbi deleted file mode 100644 index 1422fe6c..00000000 --- a/rbi/lib/openai/models/chat/completion_retrieve_params.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Chat - class CompletionRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/completion_update_params.rbi b/rbi/lib/openai/models/chat/completion_update_params.rbi deleted file mode 100644 index 7c557df2..00000000 --- a/rbi/lib/openai/models/chat/completion_update_params.rbi +++ /dev/null @@ -1,34 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Chat - class CompletionUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig do - params( - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(metadata:, request_options: {}) - end - - sig { override.returns({metadata: T.nilable(OpenAI::Models::Metadata), request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat/completions/message_list_params.rbi b/rbi/lib/openai/models/chat/completions/message_list_params.rbi deleted file mode 100644 index b5474b6c..00000000 --- a/rbi/lib/openai/models/chat/completions/message_list_params.rbi +++ /dev/null @@ -1,76 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Chat - module Completions - class MessageListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig do - params( - after: String, - limit: Integer, - order: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - override.returns( - { - after: String, - limit: Integer, - order: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/chat_model.rbi b/rbi/lib/openai/models/chat_model.rbi deleted file mode 100644 index 5120ba73..00000000 --- a/rbi/lib/openai/models/chat_model.rbi +++ /dev/null @@ -1,60 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ChatModel < OpenAI::Enum - abstract! - - O3_MINI = :"o3-mini" - O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" - O1 = :o1 - O1_2024_12_17 = :"o1-2024-12-17" - O1_PREVIEW = :"o1-preview" - O1_PREVIEW_2024_09_12 = :"o1-preview-2024-09-12" - O1_MINI = :"o1-mini" - O1_MINI_2024_09_12 = :"o1-mini-2024-09-12" - COMPUTER_USE_PREVIEW = :"computer-use-preview" - COMPUTER_USE_PREVIEW_2025_02_04 = :"computer-use-preview-2025-02-04" - COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11" - GPT_4_5_PREVIEW = :"gpt-4.5-preview" - GPT_4_5_PREVIEW_2025_02_27 = :"gpt-4.5-preview-2025-02-27" - GPT_4O = :"gpt-4o" - GPT_4O_2024_11_20 = :"gpt-4o-2024-11-20" - GPT_4O_2024_08_06 = :"gpt-4o-2024-08-06" - GPT_4O_2024_05_13 = :"gpt-4o-2024-05-13" - GPT_4O_AUDIO_PREVIEW = :"gpt-4o-audio-preview" - GPT_4O_AUDIO_PREVIEW_2024_10_01 = :"gpt-4o-audio-preview-2024-10-01" - GPT_4O_AUDIO_PREVIEW_2024_12_17 = :"gpt-4o-audio-preview-2024-12-17" - GPT_4O_MINI_AUDIO_PREVIEW = :"gpt-4o-mini-audio-preview" - GPT_4O_MINI_AUDIO_PREVIEW_2024_12_17 = :"gpt-4o-mini-audio-preview-2024-12-17" - CHATGPT_4O_LATEST = :"chatgpt-4o-latest" - GPT_4O_MINI = :"gpt-4o-mini" - GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18" - GPT_4_TURBO = :"gpt-4-turbo" - GPT_4_TURBO_2024_04_09 = :"gpt-4-turbo-2024-04-09" - GPT_4_0125_PREVIEW = :"gpt-4-0125-preview" - GPT_4_TURBO_PREVIEW = :"gpt-4-turbo-preview" - GPT_4_1106_PREVIEW = :"gpt-4-1106-preview" - GPT_4_VISION_PREVIEW = :"gpt-4-vision-preview" - GPT_4 = :"gpt-4" - GPT_4_0314 = :"gpt-4-0314" - GPT_4_0613 = :"gpt-4-0613" - GPT_4_32K = :"gpt-4-32k" - GPT_4_32K_0314 = :"gpt-4-32k-0314" - GPT_4_32K_0613 = :"gpt-4-32k-0613" - GPT_3_5_TURBO = :"gpt-3.5-turbo" - GPT_3_5_TURBO_16K = :"gpt-3.5-turbo-16k" - GPT_3_5_TURBO_0301 = :"gpt-3.5-turbo-0301" - GPT_3_5_TURBO_0613 = :"gpt-3.5-turbo-0613" - GPT_3_5_TURBO_1106 = :"gpt-3.5-turbo-1106" - GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125" - GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end -end diff --git a/rbi/lib/openai/models/comparison_filter.rbi b/rbi/lib/openai/models/comparison_filter.rbi deleted file mode 100644 index a44961a9..00000000 --- a/rbi/lib/openai/models/comparison_filter.rbi +++ /dev/null @@ -1,68 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ComparisonFilter < OpenAI::BaseModel - sig { returns(String) } - def key - end - - sig { params(_: String).returns(String) } - def key=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.any(String, Float, T::Boolean)) } - def value - end - - sig { params(_: T.any(String, Float, T::Boolean)).returns(T.any(String, Float, T::Boolean)) } - def value=(_) - end - - sig do - params(key: String, type: Symbol, value: T.any(String, Float, T::Boolean)).returns(T.attached_class) - end - def self.new(key:, type:, value:) - end - - sig { override.returns({key: String, type: Symbol, value: T.any(String, Float, T::Boolean)}) } - def to_hash - end - - class Type < OpenAI::Enum - abstract! - - EQ = :eq - NE = :ne - GT = :gt - GTE = :gte - LT = :lt - LTE = :lte - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Value < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/completion.rbi b/rbi/lib/openai/models/completion.rbi deleted file mode 100644 index 53205098..00000000 --- a/rbi/lib/openai/models/completion.rbi +++ /dev/null @@ -1,95 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class Completion < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Array[OpenAI::Models::CompletionChoice]) } - def choices - end - - sig { params(_: T::Array[OpenAI::Models::CompletionChoice]).returns(T::Array[OpenAI::Models::CompletionChoice]) } - def choices=(_) - end - - sig { returns(Integer) } - def created - end - - sig { params(_: Integer).returns(Integer) } - def created=(_) - end - - sig { returns(String) } - def model - end - - sig { params(_: String).returns(String) } - def model=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(T.nilable(String)) } - def system_fingerprint - end - - sig { params(_: String).returns(String) } - def system_fingerprint=(_) - end - - sig { returns(T.nilable(OpenAI::Models::CompletionUsage)) } - def usage - end - - sig { params(_: OpenAI::Models::CompletionUsage).returns(OpenAI::Models::CompletionUsage) } - def usage=(_) - end - - sig do - params( - id: String, - choices: T::Array[OpenAI::Models::CompletionChoice], - created: Integer, - model: String, - system_fingerprint: String, - usage: OpenAI::Models::CompletionUsage, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, choices:, created:, model:, system_fingerprint: nil, usage: nil, object: :text_completion) - end - - sig do - override - .returns( - { - id: String, - choices: T::Array[OpenAI::Models::CompletionChoice], - created: Integer, - model: String, - object: Symbol, - system_fingerprint: String, - usage: OpenAI::Models::CompletionUsage - } - ) - end - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/completion_choice.rbi b/rbi/lib/openai/models/completion_choice.rbi deleted file mode 100644 index 0e80d12b..00000000 --- a/rbi/lib/openai/models/completion_choice.rbi +++ /dev/null @@ -1,142 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class CompletionChoice < OpenAI::BaseModel - sig { returns(Symbol) } - def finish_reason - end - - sig { params(_: Symbol).returns(Symbol) } - def finish_reason=(_) - end - - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(T.nilable(OpenAI::Models::CompletionChoice::Logprobs)) } - def logprobs - end - - sig do - params(_: T.nilable(OpenAI::Models::CompletionChoice::Logprobs)) - .returns(T.nilable(OpenAI::Models::CompletionChoice::Logprobs)) - end - def logprobs=(_) - end - - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig do - params( - finish_reason: Symbol, - index: Integer, - logprobs: T.nilable(OpenAI::Models::CompletionChoice::Logprobs), - text: String - ) - .returns(T.attached_class) - end - def self.new(finish_reason:, index:, logprobs:, text:) - end - - sig do - override - .returns( - { - finish_reason: Symbol, - index: Integer, - logprobs: T.nilable(OpenAI::Models::CompletionChoice::Logprobs), - text: String - } - ) - end - def to_hash - end - - class FinishReason < OpenAI::Enum - abstract! - - STOP = :stop - LENGTH = :length - CONTENT_FILTER = :content_filter - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Logprobs < OpenAI::BaseModel - sig { returns(T.nilable(T::Array[Integer])) } - def text_offset - end - - sig { params(_: T::Array[Integer]).returns(T::Array[Integer]) } - def text_offset=(_) - end - - sig { returns(T.nilable(T::Array[Float])) } - def token_logprobs - end - - sig { params(_: T::Array[Float]).returns(T::Array[Float]) } - def token_logprobs=(_) - end - - sig { returns(T.nilable(T::Array[String])) } - def tokens - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def tokens=(_) - end - - sig { returns(T.nilable(T::Array[T::Hash[Symbol, Float]])) } - def top_logprobs - end - - sig { params(_: T::Array[T::Hash[Symbol, Float]]).returns(T::Array[T::Hash[Symbol, Float]]) } - def top_logprobs=(_) - end - - sig do - params( - text_offset: T::Array[Integer], - token_logprobs: T::Array[Float], - tokens: T::Array[String], - top_logprobs: T::Array[T::Hash[Symbol, Float]] - ) - .returns(T.attached_class) - end - def self.new(text_offset: nil, token_logprobs: nil, tokens: nil, top_logprobs: nil) - end - - sig do - override - .returns( - { - text_offset: T::Array[Integer], - token_logprobs: T::Array[Float], - tokens: T::Array[String], - top_logprobs: T::Array[T::Hash[Symbol, Float]] - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/completion_create_params.rbi b/rbi/lib/openai/models/completion_create_params.rbi deleted file mode 100644 index dc1a4be1..00000000 --- a/rbi/lib/openai/models/completion_create_params.rbi +++ /dev/null @@ -1,312 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class CompletionCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(String, Symbol)) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig do - returns(T.nilable(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]))) - end - def prompt - end - - sig do - params(_: T.nilable(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]))) - .returns(T.nilable(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]))) - end - def prompt=(_) - end - - sig { returns(T.nilable(Integer)) } - def best_of - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def best_of=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def echo - end - - sig { params(_: T.nilable(T::Boolean)).returns(T.nilable(T::Boolean)) } - def echo=(_) - end - - sig { returns(T.nilable(Float)) } - def frequency_penalty - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def frequency_penalty=(_) - end - - sig { returns(T.nilable(T::Hash[Symbol, Integer])) } - def logit_bias - end - - sig { params(_: T.nilable(T::Hash[Symbol, Integer])).returns(T.nilable(T::Hash[Symbol, Integer])) } - def logit_bias=(_) - end - - sig { returns(T.nilable(Integer)) } - def logprobs - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def logprobs=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_tokens=(_) - end - - sig { returns(T.nilable(Integer)) } - def n - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def n=(_) - end - - sig { returns(T.nilable(Float)) } - def presence_penalty - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def presence_penalty=(_) - end - - sig { returns(T.nilable(Integer)) } - def seed - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def seed=(_) - end - - sig { returns(T.nilable(T.any(String, T::Array[String]))) } - def stop - end - - sig do - params( - _: T.nilable( - T.any( - String, - T::Array[String] - ) - ) - ).returns(T.nilable(T.any(String, T::Array[String]))) - end - def stop=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions)) } - def stream_options - end - - sig do - params(_: T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions)) - .returns(T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions)) - end - def stream_options=(_) - end - - sig { returns(T.nilable(String)) } - def suffix - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def suffix=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def temperature=(_) - end - - sig { returns(T.nilable(Float)) } - def top_p - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def top_p=(_) - end - - sig { returns(T.nilable(String)) } - def user - end - - sig { params(_: String).returns(String) } - def user=(_) - end - - sig do - params( - model: T.any(String, Symbol), - prompt: T.nilable( - T.any( - String, - T::Array[String], - T::Array[Integer], - T::Array[T::Array[Integer]] - ) - ), - best_of: T.nilable(Integer), - echo: T.nilable(T::Boolean), - frequency_penalty: T.nilable(Float), - logit_bias: T.nilable(T::Hash[Symbol, Integer]), - logprobs: T.nilable(Integer), - max_tokens: T.nilable(Integer), - n: T.nilable(Integer), - presence_penalty: T.nilable(Float), - seed: T.nilable(Integer), - stop: T.nilable(T.any(String, T::Array[String])), - stream_options: T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions), - suffix: T.nilable(String), - temperature: T.nilable(Float), - top_p: T.nilable(Float), - user: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - model:, - prompt:, - best_of: nil, - echo: nil, - frequency_penalty: nil, - logit_bias: nil, - logprobs: nil, - max_tokens: nil, - n: nil, - presence_penalty: nil, - seed: nil, - stop: nil, - stream_options: nil, - suffix: nil, - temperature: nil, - top_p: nil, - user: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - model: T.any(String, Symbol), - prompt: T.nilable( - T.any( - String, - T::Array[String], - T::Array[Integer], - T::Array[T::Array[Integer]] - ) - ), - best_of: T.nilable(Integer), - echo: T.nilable(T::Boolean), - frequency_penalty: T.nilable(Float), - logit_bias: T.nilable(T::Hash[Symbol, Integer]), - logprobs: T.nilable(Integer), - max_tokens: T.nilable(Integer), - n: T.nilable(Integer), - presence_penalty: T.nilable(Float), - seed: T.nilable(Integer), - stop: T.nilable(T.any(String, T::Array[String])), - stream_options: T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions), - suffix: T.nilable(String), - temperature: T.nilable(Float), - top_p: T.nilable(Float), - user: String, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class Preset < OpenAI::Enum - abstract! - - GPT_3_5_TURBO_INSTRUCT = :"gpt-3.5-turbo-instruct" - DAVINCI_002 = :"davinci-002" - BABBAGE_002 = :"babbage-002" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class Prompt < OpenAI::Union - abstract! - - StringArray = T.type_alias { T::Array[String] } - - IntegerArray = T.type_alias { T::Array[Integer] } - - ArrayOfToken2DArray = T.type_alias { T::Array[T::Array[Integer]] } - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [NilClass, T::Array[String]], - [NilClass, T::Array[Integer]], - [NilClass, T::Array[T::Array[Integer]]] - ] - ) - end - private def variants - end - end - end - - class Stop < OpenAI::Union - abstract! - - StringArray = T.type_alias { T::Array[String] } - - class << self - sig { override.returns([[NilClass, String], [NilClass, T::Array[String]]]) } - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/completion_usage.rbi b/rbi/lib/openai/models/completion_usage.rbi deleted file mode 100644 index 36ebba0c..00000000 --- a/rbi/lib/openai/models/completion_usage.rbi +++ /dev/null @@ -1,173 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class CompletionUsage < OpenAI::BaseModel - sig { returns(Integer) } - def completion_tokens - end - - sig { params(_: Integer).returns(Integer) } - def completion_tokens=(_) - end - - sig { returns(Integer) } - def prompt_tokens - end - - sig { params(_: Integer).returns(Integer) } - def prompt_tokens=(_) - end - - sig { returns(Integer) } - def total_tokens - end - - sig { params(_: Integer).returns(Integer) } - def total_tokens=(_) - end - - sig { returns(T.nilable(OpenAI::Models::CompletionUsage::CompletionTokensDetails)) } - def completion_tokens_details - end - - sig do - params(_: OpenAI::Models::CompletionUsage::CompletionTokensDetails) - .returns(OpenAI::Models::CompletionUsage::CompletionTokensDetails) - end - def completion_tokens_details=(_) - end - - sig { returns(T.nilable(OpenAI::Models::CompletionUsage::PromptTokensDetails)) } - def prompt_tokens_details - end - - sig do - params(_: OpenAI::Models::CompletionUsage::PromptTokensDetails) - .returns(OpenAI::Models::CompletionUsage::PromptTokensDetails) - end - def prompt_tokens_details=(_) - end - - sig do - params( - completion_tokens: Integer, - prompt_tokens: Integer, - total_tokens: Integer, - completion_tokens_details: OpenAI::Models::CompletionUsage::CompletionTokensDetails, - prompt_tokens_details: OpenAI::Models::CompletionUsage::PromptTokensDetails - ) - .returns(T.attached_class) - end - def self.new( - completion_tokens:, - prompt_tokens:, - total_tokens:, - completion_tokens_details: nil, - prompt_tokens_details: nil - ) - end - - sig do - override - .returns( - { - completion_tokens: Integer, - prompt_tokens: Integer, - total_tokens: Integer, - completion_tokens_details: OpenAI::Models::CompletionUsage::CompletionTokensDetails, - prompt_tokens_details: OpenAI::Models::CompletionUsage::PromptTokensDetails - } - ) - end - def to_hash - end - - class CompletionTokensDetails < OpenAI::BaseModel - sig { returns(T.nilable(Integer)) } - def accepted_prediction_tokens - end - - sig { params(_: Integer).returns(Integer) } - def accepted_prediction_tokens=(_) - end - - sig { returns(T.nilable(Integer)) } - def audio_tokens - end - - sig { params(_: Integer).returns(Integer) } - def audio_tokens=(_) - end - - sig { returns(T.nilable(Integer)) } - def reasoning_tokens - end - - sig { params(_: Integer).returns(Integer) } - def reasoning_tokens=(_) - end - - sig { returns(T.nilable(Integer)) } - def rejected_prediction_tokens - end - - sig { params(_: Integer).returns(Integer) } - def rejected_prediction_tokens=(_) - end - - sig do - params( - accepted_prediction_tokens: Integer, - audio_tokens: Integer, - reasoning_tokens: Integer, - rejected_prediction_tokens: Integer - ) - .returns(T.attached_class) - end - def self.new(accepted_prediction_tokens: nil, audio_tokens: nil, reasoning_tokens: nil, rejected_prediction_tokens: nil) - end - - sig do - override - .returns( - { - accepted_prediction_tokens: Integer, - audio_tokens: Integer, - reasoning_tokens: Integer, - rejected_prediction_tokens: Integer - } - ) - end - def to_hash - end - end - - class PromptTokensDetails < OpenAI::BaseModel - sig { returns(T.nilable(Integer)) } - def audio_tokens - end - - sig { params(_: Integer).returns(Integer) } - def audio_tokens=(_) - end - - sig { returns(T.nilable(Integer)) } - def cached_tokens - end - - sig { params(_: Integer).returns(Integer) } - def cached_tokens=(_) - end - - sig { params(audio_tokens: Integer, cached_tokens: Integer).returns(T.attached_class) } - def self.new(audio_tokens: nil, cached_tokens: nil) - end - - sig { override.returns({audio_tokens: Integer, cached_tokens: Integer}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/compound_filter.rbi b/rbi/lib/openai/models/compound_filter.rbi deleted file mode 100644 index e8e64bd4..00000000 --- a/rbi/lib/openai/models/compound_filter.rbi +++ /dev/null @@ -1,60 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class CompoundFilter < OpenAI::BaseModel - sig { returns(T::Array[T.any(OpenAI::Models::ComparisonFilter, T.anything)]) } - def filters - end - - sig do - params(_: T::Array[T.any(OpenAI::Models::ComparisonFilter, T.anything)]) - .returns(T::Array[T.any(OpenAI::Models::ComparisonFilter, T.anything)]) - end - def filters=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(filters: T::Array[T.any(OpenAI::Models::ComparisonFilter, T.anything)], type: Symbol) - .returns(T.attached_class) - end - def self.new(filters:, type:) - end - - sig { override.returns({filters: T::Array[T.any(OpenAI::Models::ComparisonFilter, T.anything)], type: Symbol}) } - def to_hash - end - - class Filter < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, OpenAI::Models::ComparisonFilter], [NilClass, T.anything]]) } - private def variants - end - end - end - - class Type < OpenAI::Enum - abstract! - - AND = :and - OR = :or - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/create_embedding_response.rbi b/rbi/lib/openai/models/create_embedding_response.rbi deleted file mode 100644 index 6f823131..00000000 --- a/rbi/lib/openai/models/create_embedding_response.rbi +++ /dev/null @@ -1,94 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class CreateEmbeddingResponse < OpenAI::BaseModel - sig { returns(T::Array[OpenAI::Models::Embedding]) } - def data - end - - sig { params(_: T::Array[OpenAI::Models::Embedding]).returns(T::Array[OpenAI::Models::Embedding]) } - def data=(_) - end - - sig { returns(String) } - def model - end - - sig { params(_: String).returns(String) } - def model=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(OpenAI::Models::CreateEmbeddingResponse::Usage) } - def usage - end - - sig do - params(_: OpenAI::Models::CreateEmbeddingResponse::Usage) - .returns(OpenAI::Models::CreateEmbeddingResponse::Usage) - end - def usage=(_) - end - - sig do - params( - data: T::Array[OpenAI::Models::Embedding], - model: String, - usage: OpenAI::Models::CreateEmbeddingResponse::Usage, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new(data:, model:, usage:, object: :list) - end - - sig do - override - .returns( - { - data: T::Array[OpenAI::Models::Embedding], - model: String, - object: Symbol, - usage: OpenAI::Models::CreateEmbeddingResponse::Usage - } - ) - end - def to_hash - end - - class Usage < OpenAI::BaseModel - sig { returns(Integer) } - def prompt_tokens - end - - sig { params(_: Integer).returns(Integer) } - def prompt_tokens=(_) - end - - sig { returns(Integer) } - def total_tokens - end - - sig { params(_: Integer).returns(Integer) } - def total_tokens=(_) - end - - sig { params(prompt_tokens: Integer, total_tokens: Integer).returns(T.attached_class) } - def self.new(prompt_tokens:, total_tokens:) - end - - sig { override.returns({prompt_tokens: Integer, total_tokens: Integer}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/embedding.rbi b/rbi/lib/openai/models/embedding.rbi deleted file mode 100644 index 11cc9072..00000000 --- a/rbi/lib/openai/models/embedding.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class Embedding < OpenAI::BaseModel - sig { returns(T::Array[Float]) } - def embedding - end - - sig { params(_: T::Array[Float]).returns(T::Array[Float]) } - def embedding=(_) - end - - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { params(embedding: T::Array[Float], index: Integer, object: Symbol).returns(T.attached_class) } - def self.new(embedding:, index:, object: :embedding) - end - - sig { override.returns({embedding: T::Array[Float], index: Integer, object: Symbol}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/embedding_create_params.rbi b/rbi/lib/openai/models/embedding_create_params.rbi deleted file mode 100644 index 2a08c856..00000000 --- a/rbi/lib/openai/models/embedding_create_params.rbi +++ /dev/null @@ -1,132 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class EmbeddingCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]])) } - def input - end - - sig do - params(_: T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]])) - .returns(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]])) - end - def input=(_) - end - - sig { returns(T.any(String, Symbol)) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig { returns(T.nilable(Integer)) } - def dimensions - end - - sig { params(_: Integer).returns(Integer) } - def dimensions=(_) - end - - sig { returns(T.nilable(Symbol)) } - def encoding_format - end - - sig { params(_: Symbol).returns(Symbol) } - def encoding_format=(_) - end - - sig { returns(T.nilable(String)) } - def user - end - - sig { params(_: String).returns(String) } - def user=(_) - end - - sig do - params( - input: T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]), - model: T.any(String, Symbol), - dimensions: Integer, - encoding_format: Symbol, - user: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(input:, model:, dimensions: nil, encoding_format: nil, user: nil, request_options: {}) - end - - sig do - override - .returns( - { - input: T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]), - model: T.any(String, Symbol), - dimensions: Integer, - encoding_format: Symbol, - user: String, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Input < OpenAI::Union - abstract! - - StringArray = T.type_alias { T::Array[String] } - - IntegerArray = T.type_alias { T::Array[Integer] } - - ArrayOfToken2DArray = T.type_alias { T::Array[T::Array[Integer]] } - - class << self - sig do - override - .returns( - [ - [NilClass, String], - [NilClass, T::Array[String]], - [NilClass, T::Array[Integer]], - [NilClass, T::Array[T::Array[Integer]]] - ] - ) - end - private def variants - end - end - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class EncodingFormat < OpenAI::Enum - abstract! - - FLOAT = :float - BASE64 = :base64 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/embedding_model.rbi b/rbi/lib/openai/models/embedding_model.rbi deleted file mode 100644 index 161fb296..00000000 --- a/rbi/lib/openai/models/embedding_model.rbi +++ /dev/null @@ -1,19 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class EmbeddingModel < OpenAI::Enum - abstract! - - TEXT_EMBEDDING_ADA_002 = :"text-embedding-ada-002" - TEXT_EMBEDDING_3_SMALL = :"text-embedding-3-small" - TEXT_EMBEDDING_3_LARGE = :"text-embedding-3-large" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end -end diff --git a/rbi/lib/openai/models/error_object.rbi b/rbi/lib/openai/models/error_object.rbi deleted file mode 100644 index f7390e71..00000000 --- a/rbi/lib/openai/models/error_object.rbi +++ /dev/null @@ -1,52 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ErrorObject < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def code - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def code=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { returns(T.nilable(String)) } - def param - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def param=(_) - end - - sig { returns(String) } - def type - end - - sig { params(_: String).returns(String) } - def type=(_) - end - - sig do - params(code: T.nilable(String), message: String, param: T.nilable(String), type: String) - .returns(T.attached_class) - end - def self.new(code:, message:, param:, type:) - end - - sig do - override.returns({code: T.nilable(String), message: String, param: T.nilable(String), type: String}) - end - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/file_chunking_strategy.rbi b/rbi/lib/openai/models/file_chunking_strategy.rbi deleted file mode 100644 index b0b4a1b0..00000000 --- a/rbi/lib/openai/models/file_chunking_strategy.rbi +++ /dev/null @@ -1,20 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FileChunkingStrategy < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::StaticFileChunkingStrategyObject], [Symbol, OpenAI::Models::OtherFileChunkingStrategyObject]] - ) - end - private def variants - end - end - end - end -end diff --git a/rbi/lib/openai/models/file_chunking_strategy_param.rbi b/rbi/lib/openai/models/file_chunking_strategy_param.rbi deleted file mode 100644 index 9a360f39..00000000 --- a/rbi/lib/openai/models/file_chunking_strategy_param.rbi +++ /dev/null @@ -1,20 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FileChunkingStrategyParam < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::AutoFileChunkingStrategyParam], [Symbol, OpenAI::Models::StaticFileChunkingStrategyObjectParam]] - ) - end - private def variants - end - end - end - end -end diff --git a/rbi/lib/openai/models/file_content_params.rbi b/rbi/lib/openai/models/file_content_params.rbi deleted file mode 100644 index 71e0eaaa..00000000 --- a/rbi/lib/openai/models/file_content_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FileContentParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/file_create_params.rbi b/rbi/lib/openai/models/file_create_params.rbi deleted file mode 100644 index aa9afe8a..00000000 --- a/rbi/lib/openai/models/file_create_params.rbi +++ /dev/null @@ -1,49 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FileCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(IO, StringIO)) } - def file - end - - sig { params(_: T.any(IO, StringIO)).returns(T.any(IO, StringIO)) } - def file=(_) - end - - sig { returns(Symbol) } - def purpose - end - - sig { params(_: Symbol).returns(Symbol) } - def purpose=(_) - end - - sig do - params( - file: T.any(IO, StringIO), - purpose: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(file:, purpose:, request_options: {}) - end - - sig do - override.returns( - { - file: T.any(IO, StringIO), - purpose: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/file_delete_params.rbi b/rbi/lib/openai/models/file_delete_params.rbi deleted file mode 100644 index 5c6407ba..00000000 --- a/rbi/lib/openai/models/file_delete_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FileDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/file_deleted.rbi b/rbi/lib/openai/models/file_deleted.rbi deleted file mode 100644 index 83029526..00000000 --- a/rbi/lib/openai/models/file_deleted.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FileDeleted < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Boolean) } - def deleted - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def deleted=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { params(id: String, deleted: T::Boolean, object: Symbol).returns(T.attached_class) } - def self.new(id:, deleted:, object: :file) - end - - sig { override.returns({id: String, deleted: T::Boolean, object: Symbol}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/file_list_params.rbi b/rbi/lib/openai/models/file_list_params.rbi deleted file mode 100644 index 2da43a92..00000000 --- a/rbi/lib/openai/models/file_list_params.rbi +++ /dev/null @@ -1,83 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FileListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig { returns(T.nilable(String)) } - def purpose - end - - sig { params(_: String).returns(String) } - def purpose=(_) - end - - sig do - params( - after: String, - limit: Integer, - order: Symbol, - purpose: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, limit: nil, order: nil, purpose: nil, request_options: {}) - end - - sig do - override - .returns( - { - after: String, - limit: Integer, - order: Symbol, - purpose: String, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/file_object.rbi b/rbi/lib/openai/models/file_object.rbi deleted file mode 100644 index 91a4d778..00000000 --- a/rbi/lib/openai/models/file_object.rbi +++ /dev/null @@ -1,157 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FileObject < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def bytes - end - - sig { params(_: Integer).returns(Integer) } - def bytes=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(String) } - def filename - end - - sig { params(_: String).returns(String) } - def filename=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(Symbol) } - def purpose - end - - sig { params(_: Symbol).returns(Symbol) } - def purpose=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(T.nilable(Integer)) } - def expires_at - end - - sig { params(_: Integer).returns(Integer) } - def expires_at=(_) - end - - sig { returns(T.nilable(String)) } - def status_details - end - - sig { params(_: String).returns(String) } - def status_details=(_) - end - - sig do - params( - id: String, - bytes: Integer, - created_at: Integer, - filename: String, - purpose: Symbol, - status: Symbol, - expires_at: Integer, - status_details: String, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - bytes:, - created_at:, - filename:, - purpose:, - status:, - expires_at: nil, - status_details: nil, - object: :file - ) - end - - sig do - override - .returns( - { - id: String, - bytes: Integer, - created_at: Integer, - filename: String, - object: Symbol, - purpose: Symbol, - status: Symbol, - expires_at: Integer, - status_details: String - } - ) - end - def to_hash - end - - class Purpose < OpenAI::Enum - abstract! - - ASSISTANTS = :assistants - ASSISTANTS_OUTPUT = :assistants_output - BATCH = :batch - BATCH_OUTPUT = :batch_output - FINE_TUNE = :"fine-tune" - FINE_TUNE_RESULTS = :"fine-tune-results" - VISION = :vision - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Status < OpenAI::Enum - abstract! - - UPLOADED = :uploaded - PROCESSED = :processed - ERROR = :error - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/file_purpose.rbi b/rbi/lib/openai/models/file_purpose.rbi deleted file mode 100644 index a30abf94..00000000 --- a/rbi/lib/openai/models/file_purpose.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FilePurpose < OpenAI::Enum - abstract! - - ASSISTANTS = :assistants - BATCH = :batch - FINE_TUNE = :"fine-tune" - VISION = :vision - USER_DATA = :user_data - EVALS = :evals - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end -end diff --git a/rbi/lib/openai/models/file_retrieve_params.rbi b/rbi/lib/openai/models/file_retrieve_params.rbi deleted file mode 100644 index a1f9e075..00000000 --- a/rbi/lib/openai/models/file_retrieve_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FileRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi deleted file mode 100644 index ece16391..00000000 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi +++ /dev/null @@ -1,685 +0,0 @@ -# typed: strong - -module OpenAI - module Models - FineTuningJob = T.type_alias { FineTuning::FineTuningJob } - - module FineTuning - class FineTuningJob < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Error)) } - def error - end - - sig do - params(_: T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Error)) - .returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Error)) - end - def error=(_) - end - - sig { returns(T.nilable(String)) } - def fine_tuned_model - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def fine_tuned_model=(_) - end - - sig { returns(T.nilable(Integer)) } - def finished_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def finished_at=(_) - end - - sig { returns(OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters) } - def hyperparameters - end - - sig do - params(_: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters) - .returns(OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters) - end - def hyperparameters=(_) - end - - sig { returns(String) } - def model - end - - sig { params(_: String).returns(String) } - def model=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(String) } - def organization_id - end - - sig { params(_: String).returns(String) } - def organization_id=(_) - end - - sig { returns(T::Array[String]) } - def result_files - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def result_files=(_) - end - - sig { returns(Integer) } - def seed - end - - sig { params(_: Integer).returns(Integer) } - def seed=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(T.nilable(Integer)) } - def trained_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def trained_tokens=(_) - end - - sig { returns(String) } - def training_file - end - - sig { params(_: String).returns(String) } - def training_file=(_) - end - - sig { returns(T.nilable(String)) } - def validation_file - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def validation_file=(_) - end - - sig { returns(T.nilable(Integer)) } - def estimated_finish - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def estimated_finish=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject])) } - def integrations - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject])) - .returns(T.nilable(T::Array[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject])) - end - def integrations=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method)) } - def method_ - end - - sig do - params(_: OpenAI::Models::FineTuning::FineTuningJob::Method) - .returns(OpenAI::Models::FineTuning::FineTuningJob::Method) - end - def method_=(_) - end - - sig do - params( - id: String, - created_at: Integer, - error: T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Error), - fine_tuned_model: T.nilable(String), - finished_at: T.nilable(Integer), - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters, - model: String, - organization_id: String, - result_files: T::Array[String], - seed: Integer, - status: Symbol, - trained_tokens: T.nilable(Integer), - training_file: String, - validation_file: T.nilable(String), - estimated_finish: T.nilable(Integer), - integrations: T.nilable(T::Array[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject]), - metadata: T.nilable(OpenAI::Models::Metadata), - method_: OpenAI::Models::FineTuning::FineTuningJob::Method, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - created_at:, - error:, - fine_tuned_model:, - finished_at:, - hyperparameters:, - model:, - organization_id:, - result_files:, - seed:, - status:, - trained_tokens:, - training_file:, - validation_file:, - estimated_finish: nil, - integrations: nil, - metadata: nil, - method_: nil, - object: :"fine_tuning.job" - ) - end - - sig do - override - .returns( - { - id: String, - created_at: Integer, - error: T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Error), - fine_tuned_model: T.nilable(String), - finished_at: T.nilable(Integer), - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters, - model: String, - object: Symbol, - organization_id: String, - result_files: T::Array[String], - seed: Integer, - status: Symbol, - trained_tokens: T.nilable(Integer), - training_file: String, - validation_file: T.nilable(String), - estimated_finish: T.nilable(Integer), - integrations: T.nilable(T::Array[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject]), - metadata: T.nilable(OpenAI::Models::Metadata), - method_: OpenAI::Models::FineTuning::FineTuningJob::Method - } - ) - end - def to_hash - end - - class Error < OpenAI::BaseModel - sig { returns(String) } - def code - end - - sig { params(_: String).returns(String) } - def code=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { returns(T.nilable(String)) } - def param - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def param=(_) - end - - sig { params(code: String, message: String, param: T.nilable(String)).returns(T.attached_class) } - def self.new(code:, message:, param:) - end - - sig { override.returns({code: String, message: String, param: T.nilable(String)}) } - def to_hash - end - end - - class Hyperparameters < OpenAI::BaseModel - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def batch_size - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def batch_size=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Float))) } - def learning_rate_multiplier - end - - sig { params(_: T.any(Symbol, Float)).returns(T.any(Symbol, Float)) } - def learning_rate_multiplier=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def n_epochs - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def n_epochs=(_) - end - - sig do - params( - batch_size: T.any(Symbol, Integer), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - ) - .returns(T.attached_class) - end - def self.new(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil) - end - - sig do - override - .returns( - { - batch_size: T.any(Symbol, Integer), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - } - ) - end - def to_hash - end - - class BatchSize < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - - class LearningRateMultiplier < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end - end - - class NEpochs < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - end - - class Status < OpenAI::Enum - abstract! - - VALIDATING_FILES = :validating_files - QUEUED = :queued - RUNNING = :running - SUCCEEDED = :succeeded - FAILED = :failed - CANCELLED = :cancelled - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Method < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo)) } - def dpo - end - - sig do - params(_: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo) - .returns(OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo) - end - def dpo=(_) - end - - sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised)) } - def supervised - end - - sig do - params(_: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised) - .returns(OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised) - end - def supervised=(_) - end - - sig { returns(T.nilable(Symbol)) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - dpo: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo, - supervised: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(dpo: nil, supervised: nil, type: nil) - end - - sig do - override - .returns( - { - dpo: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo, - supervised: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised, - type: Symbol - } - ) - end - def to_hash - end - - class Dpo < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters)) } - def hyperparameters - end - - sig do - params(_: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters) - .returns(OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters) - end - def hyperparameters=(_) - end - - sig do - params(hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters) - .returns(T.attached_class) - end - def self.new(hyperparameters: nil) - end - - sig do - override - .returns({hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters}) - end - def to_hash - end - - class Hyperparameters < OpenAI::BaseModel - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def batch_size - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def batch_size=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Float))) } - def beta - end - - sig { params(_: T.any(Symbol, Float)).returns(T.any(Symbol, Float)) } - def beta=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Float))) } - def learning_rate_multiplier - end - - sig { params(_: T.any(Symbol, Float)).returns(T.any(Symbol, Float)) } - def learning_rate_multiplier=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def n_epochs - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def n_epochs=(_) - end - - sig do - params( - batch_size: T.any(Symbol, Integer), - beta: T.any(Symbol, Float), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - ) - .returns(T.attached_class) - end - def self.new(batch_size: nil, beta: nil, learning_rate_multiplier: nil, n_epochs: nil) - end - - sig do - override - .returns( - { - batch_size: T.any(Symbol, Integer), - beta: T.any(Symbol, Float), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - } - ) - end - def to_hash - end - - class BatchSize < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - - class Beta < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end - end - - class LearningRateMultiplier < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end - end - - class NEpochs < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - end - end - - class Supervised < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters)) } - def hyperparameters - end - - sig do - params(_: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters) - .returns(OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters) - end - def hyperparameters=(_) - end - - sig do - params(hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters) - .returns(T.attached_class) - end - def self.new(hyperparameters: nil) - end - - sig do - override - .returns({hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters}) - end - def to_hash - end - - class Hyperparameters < OpenAI::BaseModel - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def batch_size - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def batch_size=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Float))) } - def learning_rate_multiplier - end - - sig { params(_: T.any(Symbol, Float)).returns(T.any(Symbol, Float)) } - def learning_rate_multiplier=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def n_epochs - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def n_epochs=(_) - end - - sig do - params( - batch_size: T.any(Symbol, Integer), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - ) - .returns(T.attached_class) - end - def self.new(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil) - end - - sig do - override - .returns( - { - batch_size: T.any(Symbol, Integer), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - } - ) - end - def to_hash - end - - class BatchSize < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - - class LearningRateMultiplier < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end - end - - class NEpochs < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - end - end - - class Type < OpenAI::Enum - abstract! - - SUPERVISED = :supervised - DPO = :dpo - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi deleted file mode 100644 index 15ce95c4..00000000 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi +++ /dev/null @@ -1,126 +0,0 @@ -# typed: strong - -module OpenAI - module Models - FineTuningJobEvent = T.type_alias { FineTuning::FineTuningJobEvent } - - module FineTuning - class FineTuningJobEvent < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(Symbol) } - def level - end - - sig { params(_: Symbol).returns(Symbol) } - def level=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(T.nilable(T.anything)) } - def data - end - - sig { params(_: T.anything).returns(T.anything) } - def data=(_) - end - - sig { returns(T.nilable(Symbol)) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - id: String, - created_at: Integer, - level: Symbol, - message: String, - data: T.anything, - type: Symbol, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, created_at:, level:, message:, data: nil, type: nil, object: :"fine_tuning.job.event") - end - - sig do - override - .returns( - { - id: String, - created_at: Integer, - level: Symbol, - message: String, - object: Symbol, - data: T.anything, - type: Symbol - } - ) - end - def to_hash - end - - class Level < OpenAI::Enum - abstract! - - INFO = :info - WARN = :warn - ERROR = :error - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Type < OpenAI::Enum - abstract! - - MESSAGE = :message - METRICS = :metrics - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_integration.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_integration.rbi deleted file mode 100644 index d1f932e5..00000000 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_integration.rbi +++ /dev/null @@ -1,11 +0,0 @@ -# typed: strong - -module OpenAI - module Models - FineTuningJobIntegration = T.type_alias { FineTuning::FineTuningJobIntegration } - - module FineTuning - FineTuningJobIntegration = T.type_alias { OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject } - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi deleted file mode 100644 index 37b15696..00000000 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi +++ /dev/null @@ -1,62 +0,0 @@ -# typed: strong - -module OpenAI - module Models - FineTuningJobWandbIntegration = T.type_alias { FineTuning::FineTuningJobWandbIntegration } - - module FineTuning - class FineTuningJobWandbIntegration < OpenAI::BaseModel - sig { returns(String) } - def project - end - - sig { params(_: String).returns(String) } - def project=(_) - end - - sig { returns(T.nilable(String)) } - def entity - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def entity=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def name=(_) - end - - sig { returns(T.nilable(T::Array[String])) } - def tags - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def tags=(_) - end - - sig do - params(project: String, entity: T.nilable(String), name: T.nilable(String), tags: T::Array[String]) - .returns(T.attached_class) - end - def self.new(project:, entity: nil, name: nil, tags: nil) - end - - sig do - override - .returns({ - project: String, - entity: T.nilable(String), - name: T.nilable(String), - tags: T::Array[String] - }) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi deleted file mode 100644 index 148ab9c5..00000000 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - FineTuningJobWandbIntegrationObject = T.type_alias { FineTuning::FineTuningJobWandbIntegrationObject } - - module FineTuning - class FineTuningJobWandbIntegrationObject < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(OpenAI::Models::FineTuning::FineTuningJobWandbIntegration) } - def wandb - end - - sig do - params(_: OpenAI::Models::FineTuning::FineTuningJobWandbIntegration) - .returns(OpenAI::Models::FineTuning::FineTuningJobWandbIntegration) - end - def wandb=(_) - end - - sig do - params(wandb: OpenAI::Models::FineTuning::FineTuningJobWandbIntegration, type: Symbol) - .returns(T.attached_class) - end - def self.new(wandb:, type: :wandb) - end - - sig { override.returns({type: Symbol, wandb: OpenAI::Models::FineTuning::FineTuningJobWandbIntegration}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/job_cancel_params.rbi b/rbi/lib/openai/models/fine_tuning/job_cancel_params.rbi deleted file mode 100644 index bdc32d2b..00000000 --- a/rbi/lib/openai/models/fine_tuning/job_cancel_params.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module FineTuning - class JobCancelParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/job_create_params.rbi b/rbi/lib/openai/models/fine_tuning/job_create_params.rbi deleted file mode 100644 index 269a125d..00000000 --- a/rbi/lib/openai/models/fine_tuning/job_create_params.rbi +++ /dev/null @@ -1,642 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module FineTuning - class JobCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(String, Symbol)) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig { returns(String) } - def training_file - end - - sig { params(_: String).returns(String) } - def training_file=(_) - end - - sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters)) } - def hyperparameters - end - - sig do - params(_: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters) - .returns(OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters) - end - def hyperparameters=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration])) } - def integrations - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration])) - .returns(T.nilable(T::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration])) - end - def integrations=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method)) } - def method_ - end - - sig do - params(_: OpenAI::Models::FineTuning::JobCreateParams::Method) - .returns(OpenAI::Models::FineTuning::JobCreateParams::Method) - end - def method_=(_) - end - - sig { returns(T.nilable(Integer)) } - def seed - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def seed=(_) - end - - sig { returns(T.nilable(String)) } - def suffix - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def suffix=(_) - end - - sig { returns(T.nilable(String)) } - def validation_file - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def validation_file=(_) - end - - sig do - params( - model: T.any(String, Symbol), - training_file: String, - hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters, - integrations: T.nilable(T::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration]), - metadata: T.nilable(OpenAI::Models::Metadata), - method_: OpenAI::Models::FineTuning::JobCreateParams::Method, - seed: T.nilable(Integer), - suffix: T.nilable(String), - validation_file: T.nilable(String), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - model:, - training_file:, - hyperparameters: nil, - integrations: nil, - metadata: nil, - method_: nil, - seed: nil, - suffix: nil, - validation_file: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - model: T.any(String, Symbol), - training_file: String, - hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters, - integrations: T.nilable(T::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration]), - metadata: T.nilable(OpenAI::Models::Metadata), - method_: OpenAI::Models::FineTuning::JobCreateParams::Method, - seed: T.nilable(Integer), - suffix: T.nilable(String), - validation_file: T.nilable(String), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class Preset < OpenAI::Enum - abstract! - - BABBAGE_002 = :"babbage-002" - DAVINCI_002 = :"davinci-002" - GPT_3_5_TURBO = :"gpt-3.5-turbo" - GPT_4O_MINI = :"gpt-4o-mini" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class Hyperparameters < OpenAI::BaseModel - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def batch_size - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def batch_size=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Float))) } - def learning_rate_multiplier - end - - sig { params(_: T.any(Symbol, Float)).returns(T.any(Symbol, Float)) } - def learning_rate_multiplier=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def n_epochs - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def n_epochs=(_) - end - - sig do - params( - batch_size: T.any(Symbol, Integer), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - ) - .returns(T.attached_class) - end - def self.new(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil) - end - - sig do - override - .returns( - { - batch_size: T.any(Symbol, Integer), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - } - ) - end - def to_hash - end - - class BatchSize < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - - class LearningRateMultiplier < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end - end - - class NEpochs < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - end - - class Integration < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb) } - def wandb - end - - sig do - params(_: OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb) - .returns(OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb) - end - def wandb=(_) - end - - sig do - params(wandb: OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb, type: Symbol) - .returns(T.attached_class) - end - def self.new(wandb:, type: :wandb) - end - - sig { override.returns({type: Symbol, wandb: OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb}) } - def to_hash - end - - class Wandb < OpenAI::BaseModel - sig { returns(String) } - def project - end - - sig { params(_: String).returns(String) } - def project=(_) - end - - sig { returns(T.nilable(String)) } - def entity - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def entity=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def name=(_) - end - - sig { returns(T.nilable(T::Array[String])) } - def tags - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def tags=(_) - end - - sig do - params( - project: String, - entity: T.nilable(String), - name: T.nilable(String), - tags: T::Array[String] - ) - .returns(T.attached_class) - end - def self.new(project:, entity: nil, name: nil, tags: nil) - end - - sig do - override - .returns({ - project: String, - entity: T.nilable(String), - name: T.nilable(String), - tags: T::Array[String] - }) - end - def to_hash - end - end - end - - class Method < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo)) } - def dpo - end - - sig do - params(_: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo) - .returns(OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo) - end - def dpo=(_) - end - - sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised)) } - def supervised - end - - sig do - params(_: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised) - .returns(OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised) - end - def supervised=(_) - end - - sig { returns(T.nilable(Symbol)) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - dpo: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo, - supervised: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(dpo: nil, supervised: nil, type: nil) - end - - sig do - override - .returns( - { - dpo: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo, - supervised: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised, - type: Symbol - } - ) - end - def to_hash - end - - class Dpo < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters)) } - def hyperparameters - end - - sig do - params(_: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters) - .returns(OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters) - end - def hyperparameters=(_) - end - - sig do - params(hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters) - .returns(T.attached_class) - end - def self.new(hyperparameters: nil) - end - - sig do - override - .returns({hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters}) - end - def to_hash - end - - class Hyperparameters < OpenAI::BaseModel - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def batch_size - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def batch_size=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Float))) } - def beta - end - - sig { params(_: T.any(Symbol, Float)).returns(T.any(Symbol, Float)) } - def beta=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Float))) } - def learning_rate_multiplier - end - - sig { params(_: T.any(Symbol, Float)).returns(T.any(Symbol, Float)) } - def learning_rate_multiplier=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def n_epochs - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def n_epochs=(_) - end - - sig do - params( - batch_size: T.any(Symbol, Integer), - beta: T.any(Symbol, Float), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - ) - .returns(T.attached_class) - end - def self.new(batch_size: nil, beta: nil, learning_rate_multiplier: nil, n_epochs: nil) - end - - sig do - override - .returns( - { - batch_size: T.any(Symbol, Integer), - beta: T.any(Symbol, Float), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - } - ) - end - def to_hash - end - - class BatchSize < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - - class Beta < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end - end - - class LearningRateMultiplier < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end - end - - class NEpochs < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - end - end - - class Supervised < OpenAI::BaseModel - sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters)) } - def hyperparameters - end - - sig do - params(_: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters) - .returns(OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters) - end - def hyperparameters=(_) - end - - sig do - params(hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters) - .returns(T.attached_class) - end - def self.new(hyperparameters: nil) - end - - sig do - override - .returns( - {hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters} - ) - end - def to_hash - end - - class Hyperparameters < OpenAI::BaseModel - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def batch_size - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def batch_size=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Float))) } - def learning_rate_multiplier - end - - sig { params(_: T.any(Symbol, Float)).returns(T.any(Symbol, Float)) } - def learning_rate_multiplier=(_) - end - - sig { returns(T.nilable(T.any(Symbol, Integer))) } - def n_epochs - end - - sig { params(_: T.any(Symbol, Integer)).returns(T.any(Symbol, Integer)) } - def n_epochs=(_) - end - - sig do - params( - batch_size: T.any(Symbol, Integer), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - ) - .returns(T.attached_class) - end - def self.new(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil) - end - - sig do - override - .returns( - { - batch_size: T.any(Symbol, Integer), - learning_rate_multiplier: T.any(Symbol, Float), - n_epochs: T.any(Symbol, Integer) - } - ) - end - def to_hash - end - - class BatchSize < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - - class LearningRateMultiplier < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } - private def variants - end - end - end - - class NEpochs < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } - private def variants - end - end - end - end - end - - class Type < OpenAI::Enum - abstract! - - SUPERVISED = :supervised - DPO = :dpo - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi b/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi deleted file mode 100644 index ba90b85e..00000000 --- a/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi +++ /dev/null @@ -1,43 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module FineTuning - class JobListEventsParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig do - params( - after: String, - limit: Integer, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, limit: nil, request_options: {}) - end - - sig { override.returns({after: String, limit: Integer, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/job_list_params.rbi b/rbi/lib/openai/models/fine_tuning/job_list_params.rbi deleted file mode 100644 index e217f2fe..00000000 --- a/rbi/lib/openai/models/fine_tuning/job_list_params.rbi +++ /dev/null @@ -1,62 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module FineTuning - class JobListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(T::Hash[Symbol, String])) } - def metadata - end - - sig { params(_: T.nilable(T::Hash[Symbol, String])).returns(T.nilable(T::Hash[Symbol, String])) } - def metadata=(_) - end - - sig do - params( - after: String, - limit: Integer, - metadata: T.nilable(T::Hash[Symbol, String]), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, limit: nil, metadata: nil, request_options: {}) - end - - sig do - override - .returns( - { - after: String, - limit: Integer, - metadata: T.nilable(T::Hash[Symbol, String]), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/job_retrieve_params.rbi b/rbi/lib/openai/models/fine_tuning/job_retrieve_params.rbi deleted file mode 100644 index 238e3029..00000000 --- a/rbi/lib/openai/models/fine_tuning/job_retrieve_params.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module FineTuning - class JobRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi b/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi deleted file mode 100644 index a032fba4..00000000 --- a/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi +++ /dev/null @@ -1,45 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module FineTuning - module Jobs - class CheckpointListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig do - params( - after: String, - limit: Integer, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, limit: nil, request_options: {}) - end - - sig { override.returns({after: String, limit: Integer, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi b/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi deleted file mode 100644 index 077add44..00000000 --- a/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi +++ /dev/null @@ -1,208 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module FineTuning - module Jobs - class FineTuningJobCheckpoint < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(String) } - def fine_tuned_model_checkpoint - end - - sig { params(_: String).returns(String) } - def fine_tuned_model_checkpoint=(_) - end - - sig { returns(String) } - def fine_tuning_job_id - end - - sig { params(_: String).returns(String) } - def fine_tuning_job_id=(_) - end - - sig { returns(OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics) } - def metrics - end - - sig do - params(_: OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics) - .returns(OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics) - end - def metrics=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(Integer) } - def step_number - end - - sig { params(_: Integer).returns(Integer) } - def step_number=(_) - end - - sig do - params( - id: String, - created_at: Integer, - fine_tuned_model_checkpoint: String, - fine_tuning_job_id: String, - metrics: OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, - step_number: Integer, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - created_at:, - fine_tuned_model_checkpoint:, - fine_tuning_job_id:, - metrics:, - step_number:, - object: :"fine_tuning.job.checkpoint" - ) - end - - sig do - override - .returns( - { - id: String, - created_at: Integer, - fine_tuned_model_checkpoint: String, - fine_tuning_job_id: String, - metrics: OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, - object: Symbol, - step_number: Integer - } - ) - end - def to_hash - end - - class Metrics < OpenAI::BaseModel - sig { returns(T.nilable(Float)) } - def full_valid_loss - end - - sig { params(_: Float).returns(Float) } - def full_valid_loss=(_) - end - - sig { returns(T.nilable(Float)) } - def full_valid_mean_token_accuracy - end - - sig { params(_: Float).returns(Float) } - def full_valid_mean_token_accuracy=(_) - end - - sig { returns(T.nilable(Float)) } - def step - end - - sig { params(_: Float).returns(Float) } - def step=(_) - end - - sig { returns(T.nilable(Float)) } - def train_loss - end - - sig { params(_: Float).returns(Float) } - def train_loss=(_) - end - - sig { returns(T.nilable(Float)) } - def train_mean_token_accuracy - end - - sig { params(_: Float).returns(Float) } - def train_mean_token_accuracy=(_) - end - - sig { returns(T.nilable(Float)) } - def valid_loss - end - - sig { params(_: Float).returns(Float) } - def valid_loss=(_) - end - - sig { returns(T.nilable(Float)) } - def valid_mean_token_accuracy - end - - sig { params(_: Float).returns(Float) } - def valid_mean_token_accuracy=(_) - end - - sig do - params( - full_valid_loss: Float, - full_valid_mean_token_accuracy: Float, - step: Float, - train_loss: Float, - train_mean_token_accuracy: Float, - valid_loss: Float, - valid_mean_token_accuracy: Float - ) - .returns(T.attached_class) - end - def self.new( - full_valid_loss: nil, - full_valid_mean_token_accuracy: nil, - step: nil, - train_loss: nil, - train_mean_token_accuracy: nil, - valid_loss: nil, - valid_mean_token_accuracy: nil - ) - end - - sig do - override - .returns( - { - full_valid_loss: Float, - full_valid_mean_token_accuracy: Float, - step: Float, - train_loss: Float, - train_mean_token_accuracy: Float, - valid_loss: Float, - valid_mean_token_accuracy: Float - } - ) - end - def to_hash - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/function_definition.rbi b/rbi/lib/openai/models/function_definition.rbi deleted file mode 100644 index f4fe38c3..00000000 --- a/rbi/lib/openai/models/function_definition.rbi +++ /dev/null @@ -1,60 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class FunctionDefinition < OpenAI::BaseModel - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(T.nilable(String)) } - def description - end - - sig { params(_: String).returns(String) } - def description=(_) - end - - sig { returns(T.nilable(OpenAI::Models::FunctionParameters)) } - def parameters - end - - sig { params(_: OpenAI::Models::FunctionParameters).returns(OpenAI::Models::FunctionParameters) } - def parameters=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def strict - end - - sig { params(_: T.nilable(T::Boolean)).returns(T.nilable(T::Boolean)) } - def strict=(_) - end - - sig do - params( - name: String, - description: String, - parameters: OpenAI::Models::FunctionParameters, - strict: T.nilable(T::Boolean) - ) - .returns(T.attached_class) - end - def self.new(name:, description: nil, parameters: nil, strict: nil) - end - - sig do - override - .returns( - {name: String, description: String, parameters: OpenAI::Models::FunctionParameters, strict: T.nilable(T::Boolean)} - ) - end - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/function_parameters.rbi b/rbi/lib/openai/models/function_parameters.rbi deleted file mode 100644 index 4a46a5a2..00000000 --- a/rbi/lib/openai/models/function_parameters.rbi +++ /dev/null @@ -1,7 +0,0 @@ -# typed: strong - -module OpenAI - module Models - FunctionParameters = T.type_alias { T::Hash[Symbol, T.anything] } - end -end diff --git a/rbi/lib/openai/models/image.rbi b/rbi/lib/openai/models/image.rbi deleted file mode 100644 index 81607a7a..00000000 --- a/rbi/lib/openai/models/image.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class Image < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def b64_json - end - - sig { params(_: String).returns(String) } - def b64_json=(_) - end - - sig { returns(T.nilable(String)) } - def revised_prompt - end - - sig { params(_: String).returns(String) } - def revised_prompt=(_) - end - - sig { returns(T.nilable(String)) } - def url - end - - sig { params(_: String).returns(String) } - def url=(_) - end - - sig { params(b64_json: String, revised_prompt: String, url: String).returns(T.attached_class) } - def self.new(b64_json: nil, revised_prompt: nil, url: nil) - end - - sig { override.returns({b64_json: String, revised_prompt: String, url: String}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/image_create_variation_params.rbi b/rbi/lib/openai/models/image_create_variation_params.rbi deleted file mode 100644 index 1d40fb57..00000000 --- a/rbi/lib/openai/models/image_create_variation_params.rbi +++ /dev/null @@ -1,127 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ImageCreateVariationParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(IO, StringIO)) } - def image - end - - sig { params(_: T.any(IO, StringIO)).returns(T.any(IO, StringIO)) } - def image=(_) - end - - sig { returns(T.nilable(T.any(String, Symbol))) } - def model - end - - sig { params(_: T.nilable(T.any(String, Symbol))).returns(T.nilable(T.any(String, Symbol))) } - def model=(_) - end - - sig { returns(T.nilable(Integer)) } - def n - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def n=(_) - end - - sig { returns(T.nilable(Symbol)) } - def response_format - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def response_format=(_) - end - - sig { returns(T.nilable(Symbol)) } - def size - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def size=(_) - end - - sig { returns(T.nilable(String)) } - def user - end - - sig { params(_: String).returns(String) } - def user=(_) - end - - sig do - params( - image: T.any(IO, StringIO), - model: T.nilable(T.any(String, Symbol)), - n: T.nilable(Integer), - response_format: T.nilable(Symbol), - size: T.nilable(Symbol), - user: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(image:, model: nil, n: nil, response_format: nil, size: nil, user: nil, request_options: {}) - end - - sig do - override - .returns( - { - image: T.any(IO, StringIO), - model: T.nilable(T.any(String, Symbol)), - n: T.nilable(Integer), - response_format: T.nilable(Symbol), - size: T.nilable(Symbol), - user: String, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class ResponseFormat < OpenAI::Enum - abstract! - - URL = T.let(:url, T.nilable(Symbol)) - B64_JSON = T.let(:b64_json, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Size < OpenAI::Enum - abstract! - - NUMBER_256X256 = T.let(:"256x256", T.nilable(Symbol)) - NUMBER_512X512 = T.let(:"512x512", T.nilable(Symbol)) - NUMBER_1024X1024 = T.let(:"1024x1024", T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/image_edit_params.rbi b/rbi/lib/openai/models/image_edit_params.rbi deleted file mode 100644 index fb3b78a8..00000000 --- a/rbi/lib/openai/models/image_edit_params.rbi +++ /dev/null @@ -1,157 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ImageEditParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(IO, StringIO)) } - def image - end - - sig { params(_: T.any(IO, StringIO)).returns(T.any(IO, StringIO)) } - def image=(_) - end - - sig { returns(String) } - def prompt - end - - sig { params(_: String).returns(String) } - def prompt=(_) - end - - sig { returns(T.nilable(T.any(IO, StringIO))) } - def mask - end - - sig { params(_: T.any(IO, StringIO)).returns(T.any(IO, StringIO)) } - def mask=(_) - end - - sig { returns(T.nilable(T.any(String, Symbol))) } - def model - end - - sig { params(_: T.nilable(T.any(String, Symbol))).returns(T.nilable(T.any(String, Symbol))) } - def model=(_) - end - - sig { returns(T.nilable(Integer)) } - def n - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def n=(_) - end - - sig { returns(T.nilable(Symbol)) } - def response_format - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def response_format=(_) - end - - sig { returns(T.nilable(Symbol)) } - def size - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def size=(_) - end - - sig { returns(T.nilable(String)) } - def user - end - - sig { params(_: String).returns(String) } - def user=(_) - end - - sig do - params( - image: T.any(IO, StringIO), - prompt: String, - mask: T.any(IO, StringIO), - model: T.nilable(T.any(String, Symbol)), - n: T.nilable(Integer), - response_format: T.nilable(Symbol), - size: T.nilable(Symbol), - user: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - image:, - prompt:, - mask: nil, - model: nil, - n: nil, - response_format: nil, - size: nil, - user: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - image: T.any(IO, StringIO), - prompt: String, - mask: T.any(IO, StringIO), - model: T.nilable(T.any(String, Symbol)), - n: T.nilable(Integer), - response_format: T.nilable(Symbol), - size: T.nilable(Symbol), - user: String, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class ResponseFormat < OpenAI::Enum - abstract! - - URL = T.let(:url, T.nilable(Symbol)) - B64_JSON = T.let(:b64_json, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Size < OpenAI::Enum - abstract! - - NUMBER_256X256 = T.let(:"256x256", T.nilable(Symbol)) - NUMBER_512X512 = T.let(:"512x512", T.nilable(Symbol)) - NUMBER_1024X1024 = T.let(:"1024x1024", T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/image_generate_params.rbi b/rbi/lib/openai/models/image_generate_params.rbi deleted file mode 100644 index c5e39887..00000000 --- a/rbi/lib/openai/models/image_generate_params.rbi +++ /dev/null @@ -1,185 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ImageGenerateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def prompt - end - - sig { params(_: String).returns(String) } - def prompt=(_) - end - - sig { returns(T.nilable(T.any(String, Symbol))) } - def model - end - - sig { params(_: T.nilable(T.any(String, Symbol))).returns(T.nilable(T.any(String, Symbol))) } - def model=(_) - end - - sig { returns(T.nilable(Integer)) } - def n - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def n=(_) - end - - sig { returns(T.nilable(Symbol)) } - def quality - end - - sig { params(_: Symbol).returns(Symbol) } - def quality=(_) - end - - sig { returns(T.nilable(Symbol)) } - def response_format - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def response_format=(_) - end - - sig { returns(T.nilable(Symbol)) } - def size - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def size=(_) - end - - sig { returns(T.nilable(Symbol)) } - def style - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def style=(_) - end - - sig { returns(T.nilable(String)) } - def user - end - - sig { params(_: String).returns(String) } - def user=(_) - end - - sig do - params( - prompt: String, - model: T.nilable(T.any(String, Symbol)), - n: T.nilable(Integer), - quality: Symbol, - response_format: T.nilable(Symbol), - size: T.nilable(Symbol), - style: T.nilable(Symbol), - user: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - prompt:, - model: nil, - n: nil, - quality: nil, - response_format: nil, - size: nil, - style: nil, - user: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - prompt: String, - model: T.nilable(T.any(String, Symbol)), - n: T.nilable(Integer), - quality: Symbol, - response_format: T.nilable(Symbol), - size: T.nilable(Symbol), - style: T.nilable(Symbol), - user: String, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class Quality < OpenAI::Enum - abstract! - - STANDARD = :standard - HD = :hd - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class ResponseFormat < OpenAI::Enum - abstract! - - URL = T.let(:url, T.nilable(Symbol)) - B64_JSON = T.let(:b64_json, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Size < OpenAI::Enum - abstract! - - NUMBER_256X256 = T.let(:"256x256", T.nilable(Symbol)) - NUMBER_512X512 = T.let(:"512x512", T.nilable(Symbol)) - NUMBER_1024X1024 = T.let(:"1024x1024", T.nilable(Symbol)) - NUMBER_1792X1024 = T.let(:"1792x1024", T.nilable(Symbol)) - NUMBER_1024X1792 = T.let(:"1024x1792", T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Style < OpenAI::Enum - abstract! - - VIVID = T.let(:vivid, T.nilable(Symbol)) - NATURAL = T.let(:natural, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/image_model.rbi b/rbi/lib/openai/models/image_model.rbi deleted file mode 100644 index 08c7dccd..00000000 --- a/rbi/lib/openai/models/image_model.rbi +++ /dev/null @@ -1,18 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ImageModel < OpenAI::Enum - abstract! - - DALL_E_2 = :"dall-e-2" - DALL_E_3 = :"dall-e-3" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end -end diff --git a/rbi/lib/openai/models/images_response.rbi b/rbi/lib/openai/models/images_response.rbi deleted file mode 100644 index e9537809..00000000 --- a/rbi/lib/openai/models/images_response.rbi +++ /dev/null @@ -1,31 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ImagesResponse < OpenAI::BaseModel - sig { returns(Integer) } - def created - end - - sig { params(_: Integer).returns(Integer) } - def created=(_) - end - - sig { returns(T::Array[OpenAI::Models::Image]) } - def data - end - - sig { params(_: T::Array[OpenAI::Models::Image]).returns(T::Array[OpenAI::Models::Image]) } - def data=(_) - end - - sig { params(created: Integer, data: T::Array[OpenAI::Models::Image]).returns(T.attached_class) } - def self.new(created:, data:) - end - - sig { override.returns({created: Integer, data: T::Array[OpenAI::Models::Image]}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/metadata.rbi b/rbi/lib/openai/models/metadata.rbi deleted file mode 100644 index 634c341b..00000000 --- a/rbi/lib/openai/models/metadata.rbi +++ /dev/null @@ -1,7 +0,0 @@ -# typed: strong - -module OpenAI - module Models - Metadata = T.type_alias { T.nilable(T::Hash[Symbol, String]) } - end -end diff --git a/rbi/lib/openai/models/model.rbi b/rbi/lib/openai/models/model.rbi deleted file mode 100644 index ad1c5f72..00000000 --- a/rbi/lib/openai/models/model.rbi +++ /dev/null @@ -1,47 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class Model < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def created - end - - sig { params(_: Integer).returns(Integer) } - def created=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(String) } - def owned_by - end - - sig { params(_: String).returns(String) } - def owned_by=(_) - end - - sig { params(id: String, created: Integer, owned_by: String, object: Symbol).returns(T.attached_class) } - def self.new(id:, created:, owned_by:, object: :model) - end - - sig { override.returns({id: String, created: Integer, object: Symbol, owned_by: String}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/model_delete_params.rbi b/rbi/lib/openai/models/model_delete_params.rbi deleted file mode 100644 index ad6f9bd8..00000000 --- a/rbi/lib/openai/models/model_delete_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ModelDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/model_deleted.rbi b/rbi/lib/openai/models/model_deleted.rbi deleted file mode 100644 index 6ed7605b..00000000 --- a/rbi/lib/openai/models/model_deleted.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ModelDeleted < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Boolean) } - def deleted - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def deleted=(_) - end - - sig { returns(String) } - def object - end - - sig { params(_: String).returns(String) } - def object=(_) - end - - sig { params(id: String, deleted: T::Boolean, object: String).returns(T.attached_class) } - def self.new(id:, deleted:, object:) - end - - sig { override.returns({id: String, deleted: T::Boolean, object: String}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/model_list_params.rbi b/rbi/lib/openai/models/model_list_params.rbi deleted file mode 100644 index 90fef299..00000000 --- a/rbi/lib/openai/models/model_list_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ModelListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/model_retrieve_params.rbi b/rbi/lib/openai/models/model_retrieve_params.rbi deleted file mode 100644 index dc7fc8c5..00000000 --- a/rbi/lib/openai/models/model_retrieve_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ModelRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/moderation.rbi b/rbi/lib/openai/models/moderation.rbi deleted file mode 100644 index aba76998..00000000 --- a/rbi/lib/openai/models/moderation.rbi +++ /dev/null @@ -1,722 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class Moderation < OpenAI::BaseModel - sig { returns(OpenAI::Models::Moderation::Categories) } - def categories - end - - sig { params(_: OpenAI::Models::Moderation::Categories).returns(OpenAI::Models::Moderation::Categories) } - def categories=(_) - end - - sig { returns(OpenAI::Models::Moderation::CategoryAppliedInputTypes) } - def category_applied_input_types - end - - sig do - params(_: OpenAI::Models::Moderation::CategoryAppliedInputTypes) - .returns(OpenAI::Models::Moderation::CategoryAppliedInputTypes) - end - def category_applied_input_types=(_) - end - - sig { returns(OpenAI::Models::Moderation::CategoryScores) } - def category_scores - end - - sig { params(_: OpenAI::Models::Moderation::CategoryScores).returns(OpenAI::Models::Moderation::CategoryScores) } - def category_scores=(_) - end - - sig { returns(T::Boolean) } - def flagged - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def flagged=(_) - end - - sig do - params( - categories: OpenAI::Models::Moderation::Categories, - category_applied_input_types: OpenAI::Models::Moderation::CategoryAppliedInputTypes, - category_scores: OpenAI::Models::Moderation::CategoryScores, - flagged: T::Boolean - ) - .returns(T.attached_class) - end - def self.new(categories:, category_applied_input_types:, category_scores:, flagged:) - end - - sig do - override - .returns( - { - categories: OpenAI::Models::Moderation::Categories, - category_applied_input_types: OpenAI::Models::Moderation::CategoryAppliedInputTypes, - category_scores: OpenAI::Models::Moderation::CategoryScores, - flagged: T::Boolean - } - ) - end - def to_hash - end - - class Categories < OpenAI::BaseModel - sig { returns(T::Boolean) } - def harassment - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def harassment=(_) - end - - sig { returns(T::Boolean) } - def harassment_threatening - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def harassment_threatening=(_) - end - - sig { returns(T::Boolean) } - def hate - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def hate=(_) - end - - sig { returns(T::Boolean) } - def hate_threatening - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def hate_threatening=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def illicit - end - - sig { params(_: T.nilable(T::Boolean)).returns(T.nilable(T::Boolean)) } - def illicit=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def illicit_violent - end - - sig { params(_: T.nilable(T::Boolean)).returns(T.nilable(T::Boolean)) } - def illicit_violent=(_) - end - - sig { returns(T::Boolean) } - def self_harm - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def self_harm=(_) - end - - sig { returns(T::Boolean) } - def self_harm_instructions - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def self_harm_instructions=(_) - end - - sig { returns(T::Boolean) } - def self_harm_intent - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def self_harm_intent=(_) - end - - sig { returns(T::Boolean) } - def sexual - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def sexual=(_) - end - - sig { returns(T::Boolean) } - def sexual_minors - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def sexual_minors=(_) - end - - sig { returns(T::Boolean) } - def violence - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def violence=(_) - end - - sig { returns(T::Boolean) } - def violence_graphic - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def violence_graphic=(_) - end - - sig do - params( - harassment: T::Boolean, - harassment_threatening: T::Boolean, - hate: T::Boolean, - hate_threatening: T::Boolean, - illicit: T.nilable(T::Boolean), - illicit_violent: T.nilable(T::Boolean), - self_harm: T::Boolean, - self_harm_instructions: T::Boolean, - self_harm_intent: T::Boolean, - sexual: T::Boolean, - sexual_minors: T::Boolean, - violence: T::Boolean, - violence_graphic: T::Boolean - ) - .returns(T.attached_class) - end - def self.new( - harassment:, - harassment_threatening:, - hate:, - hate_threatening:, - illicit:, - illicit_violent:, - self_harm:, - self_harm_instructions:, - self_harm_intent:, - sexual:, - sexual_minors:, - violence:, - violence_graphic: - ) - end - - sig do - override - .returns( - { - harassment: T::Boolean, - harassment_threatening: T::Boolean, - hate: T::Boolean, - hate_threatening: T::Boolean, - illicit: T.nilable(T::Boolean), - illicit_violent: T.nilable(T::Boolean), - self_harm: T::Boolean, - self_harm_instructions: T::Boolean, - self_harm_intent: T::Boolean, - sexual: T::Boolean, - sexual_minors: T::Boolean, - violence: T::Boolean, - violence_graphic: T::Boolean - } - ) - end - def to_hash - end - end - - class CategoryAppliedInputTypes < OpenAI::BaseModel - sig { returns(T::Array[Symbol]) } - def harassment - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def harassment=(_) - end - - sig { returns(T::Array[Symbol]) } - def harassment_threatening - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def harassment_threatening=(_) - end - - sig { returns(T::Array[Symbol]) } - def hate - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def hate=(_) - end - - sig { returns(T::Array[Symbol]) } - def hate_threatening - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def hate_threatening=(_) - end - - sig { returns(T::Array[Symbol]) } - def illicit - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def illicit=(_) - end - - sig { returns(T::Array[Symbol]) } - def illicit_violent - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def illicit_violent=(_) - end - - sig { returns(T::Array[Symbol]) } - def self_harm - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def self_harm=(_) - end - - sig { returns(T::Array[Symbol]) } - def self_harm_instructions - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def self_harm_instructions=(_) - end - - sig { returns(T::Array[Symbol]) } - def self_harm_intent - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def self_harm_intent=(_) - end - - sig { returns(T::Array[Symbol]) } - def sexual - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def sexual=(_) - end - - sig { returns(T::Array[Symbol]) } - def sexual_minors - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def sexual_minors=(_) - end - - sig { returns(T::Array[Symbol]) } - def violence - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def violence=(_) - end - - sig { returns(T::Array[Symbol]) } - def violence_graphic - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def violence_graphic=(_) - end - - sig do - params( - harassment: T::Array[Symbol], - harassment_threatening: T::Array[Symbol], - hate: T::Array[Symbol], - hate_threatening: T::Array[Symbol], - illicit: T::Array[Symbol], - illicit_violent: T::Array[Symbol], - self_harm: T::Array[Symbol], - self_harm_instructions: T::Array[Symbol], - self_harm_intent: T::Array[Symbol], - sexual: T::Array[Symbol], - sexual_minors: T::Array[Symbol], - violence: T::Array[Symbol], - violence_graphic: T::Array[Symbol] - ) - .returns(T.attached_class) - end - def self.new( - harassment:, - harassment_threatening:, - hate:, - hate_threatening:, - illicit:, - illicit_violent:, - self_harm:, - self_harm_instructions:, - self_harm_intent:, - sexual:, - sexual_minors:, - violence:, - violence_graphic: - ) - end - - sig do - override - .returns( - { - harassment: T::Array[Symbol], - harassment_threatening: T::Array[Symbol], - hate: T::Array[Symbol], - hate_threatening: T::Array[Symbol], - illicit: T::Array[Symbol], - illicit_violent: T::Array[Symbol], - self_harm: T::Array[Symbol], - self_harm_instructions: T::Array[Symbol], - self_harm_intent: T::Array[Symbol], - sexual: T::Array[Symbol], - sexual_minors: T::Array[Symbol], - violence: T::Array[Symbol], - violence_graphic: T::Array[Symbol] - } - ) - end - def to_hash - end - - class Harassment < OpenAI::Enum - abstract! - - TEXT = :text - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class HarassmentThreatening < OpenAI::Enum - abstract! - - TEXT = :text - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Hate < OpenAI::Enum - abstract! - - TEXT = :text - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class HateThreatening < OpenAI::Enum - abstract! - - TEXT = :text - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Illicit < OpenAI::Enum - abstract! - - TEXT = :text - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class IllicitViolent < OpenAI::Enum - abstract! - - TEXT = :text - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class SelfHarm < OpenAI::Enum - abstract! - - TEXT = :text - IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class SelfHarmInstruction < OpenAI::Enum - abstract! - - TEXT = :text - IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class SelfHarmIntent < OpenAI::Enum - abstract! - - TEXT = :text - IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Sexual < OpenAI::Enum - abstract! - - TEXT = :text - IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class SexualMinor < OpenAI::Enum - abstract! - - TEXT = :text - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Violence < OpenAI::Enum - abstract! - - TEXT = :text - IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class ViolenceGraphic < OpenAI::Enum - abstract! - - TEXT = :text - IMAGE = :image - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class CategoryScores < OpenAI::BaseModel - sig { returns(Float) } - def harassment - end - - sig { params(_: Float).returns(Float) } - def harassment=(_) - end - - sig { returns(Float) } - def harassment_threatening - end - - sig { params(_: Float).returns(Float) } - def harassment_threatening=(_) - end - - sig { returns(Float) } - def hate - end - - sig { params(_: Float).returns(Float) } - def hate=(_) - end - - sig { returns(Float) } - def hate_threatening - end - - sig { params(_: Float).returns(Float) } - def hate_threatening=(_) - end - - sig { returns(Float) } - def illicit - end - - sig { params(_: Float).returns(Float) } - def illicit=(_) - end - - sig { returns(Float) } - def illicit_violent - end - - sig { params(_: Float).returns(Float) } - def illicit_violent=(_) - end - - sig { returns(Float) } - def self_harm - end - - sig { params(_: Float).returns(Float) } - def self_harm=(_) - end - - sig { returns(Float) } - def self_harm_instructions - end - - sig { params(_: Float).returns(Float) } - def self_harm_instructions=(_) - end - - sig { returns(Float) } - def self_harm_intent - end - - sig { params(_: Float).returns(Float) } - def self_harm_intent=(_) - end - - sig { returns(Float) } - def sexual - end - - sig { params(_: Float).returns(Float) } - def sexual=(_) - end - - sig { returns(Float) } - def sexual_minors - end - - sig { params(_: Float).returns(Float) } - def sexual_minors=(_) - end - - sig { returns(Float) } - def violence - end - - sig { params(_: Float).returns(Float) } - def violence=(_) - end - - sig { returns(Float) } - def violence_graphic - end - - sig { params(_: Float).returns(Float) } - def violence_graphic=(_) - end - - sig do - params( - harassment: Float, - harassment_threatening: Float, - hate: Float, - hate_threatening: Float, - illicit: Float, - illicit_violent: Float, - self_harm: Float, - self_harm_instructions: Float, - self_harm_intent: Float, - sexual: Float, - sexual_minors: Float, - violence: Float, - violence_graphic: Float - ) - .returns(T.attached_class) - end - def self.new( - harassment:, - harassment_threatening:, - hate:, - hate_threatening:, - illicit:, - illicit_violent:, - self_harm:, - self_harm_instructions:, - self_harm_intent:, - sexual:, - sexual_minors:, - violence:, - violence_graphic: - ) - end - - sig do - override - .returns( - { - harassment: Float, - harassment_threatening: Float, - hate: Float, - hate_threatening: Float, - illicit: Float, - illicit_violent: Float, - self_harm: Float, - self_harm_instructions: Float, - self_harm_intent: Float, - sexual: Float, - sexual_minors: Float, - violence: Float, - violence_graphic: Float - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/moderation_create_params.rbi b/rbi/lib/openai/models/moderation_create_params.rbi deleted file mode 100644 index 998863d3..00000000 --- a/rbi/lib/openai/models/moderation_create_params.rbi +++ /dev/null @@ -1,110 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ModerationCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - returns( - T.any( - String, - T::Array[String], - T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] - ) - ) - end - def input - end - - sig do - params( - _: T.any( - String, - T::Array[String], - T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] - ) - ) - .returns( - T.any( - String, - T::Array[String], - T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] - ) - ) - end - def input=(_) - end - - sig { returns(T.nilable(T.any(String, Symbol))) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig do - params( - input: T.any( - String, - T::Array[String], - T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] - ), - model: T.any(String, Symbol), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(input:, model: nil, request_options: {}) - end - - sig do - override - .returns( - { - input: T.any( - String, - T::Array[String], - T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] - ), - model: T.any(String, Symbol), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Input < OpenAI::Union - abstract! - - StringArray = T.type_alias { T::Array[String] } - - ModerationMultiModalInputArray = T.type_alias { T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] } - - class << self - sig do - override - .returns( - [[NilClass, String], [NilClass, T::Array[String]], [NilClass, T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)]]] - ) - end - private def variants - end - end - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/moderation_create_response.rbi b/rbi/lib/openai/models/moderation_create_response.rbi deleted file mode 100644 index 46b29878..00000000 --- a/rbi/lib/openai/models/moderation_create_response.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ModerationCreateResponse < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def model - end - - sig { params(_: String).returns(String) } - def model=(_) - end - - sig { returns(T::Array[OpenAI::Models::Moderation]) } - def results - end - - sig { params(_: T::Array[OpenAI::Models::Moderation]).returns(T::Array[OpenAI::Models::Moderation]) } - def results=(_) - end - - sig { params(id: String, model: String, results: T::Array[OpenAI::Models::Moderation]).returns(T.attached_class) } - def self.new(id:, model:, results:) - end - - sig { override.returns({id: String, model: String, results: T::Array[OpenAI::Models::Moderation]}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/moderation_image_url_input.rbi b/rbi/lib/openai/models/moderation_image_url_input.rbi deleted file mode 100644 index a7ecaefe..00000000 --- a/rbi/lib/openai/models/moderation_image_url_input.rbi +++ /dev/null @@ -1,55 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ModerationImageURLInput < OpenAI::BaseModel - sig { returns(OpenAI::Models::ModerationImageURLInput::ImageURL) } - def image_url - end - - sig do - params(_: OpenAI::Models::ModerationImageURLInput::ImageURL) - .returns(OpenAI::Models::ModerationImageURLInput::ImageURL) - end - def image_url=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(image_url: OpenAI::Models::ModerationImageURLInput::ImageURL, type: Symbol) - .returns(T.attached_class) - end - def self.new(image_url:, type: :image_url) - end - - sig { override.returns({image_url: OpenAI::Models::ModerationImageURLInput::ImageURL, type: Symbol}) } - def to_hash - end - - class ImageURL < OpenAI::BaseModel - sig { returns(String) } - def url - end - - sig { params(_: String).returns(String) } - def url=(_) - end - - sig { params(url: String).returns(T.attached_class) } - def self.new(url:) - end - - sig { override.returns({url: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/moderation_model.rbi b/rbi/lib/openai/models/moderation_model.rbi deleted file mode 100644 index e3a00bac..00000000 --- a/rbi/lib/openai/models/moderation_model.rbi +++ /dev/null @@ -1,20 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ModerationModel < OpenAI::Enum - abstract! - - OMNI_MODERATION_LATEST = :"omni-moderation-latest" - OMNI_MODERATION_2024_09_26 = :"omni-moderation-2024-09-26" - TEXT_MODERATION_LATEST = :"text-moderation-latest" - TEXT_MODERATION_STABLE = :"text-moderation-stable" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end -end diff --git a/rbi/lib/openai/models/moderation_multi_modal_input.rbi b/rbi/lib/openai/models/moderation_multi_modal_input.rbi deleted file mode 100644 index 2d658e57..00000000 --- a/rbi/lib/openai/models/moderation_multi_modal_input.rbi +++ /dev/null @@ -1,20 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ModerationMultiModalInput < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::ModerationImageURLInput], [Symbol, OpenAI::Models::ModerationTextInput]] - ) - end - private def variants - end - end - end - end -end diff --git a/rbi/lib/openai/models/moderation_text_input.rbi b/rbi/lib/openai/models/moderation_text_input.rbi deleted file mode 100644 index 85c34f7f..00000000 --- a/rbi/lib/openai/models/moderation_text_input.rbi +++ /dev/null @@ -1,31 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ModerationTextInput < OpenAI::BaseModel - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(text: String, type: Symbol).returns(T.attached_class) } - def self.new(text:, type: :text) - end - - sig { override.returns({text: String, type: Symbol}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi b/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi deleted file mode 100644 index c4d89d20..00000000 --- a/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class OtherFileChunkingStrategyObject < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :other) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/reasoning.rbi b/rbi/lib/openai/models/reasoning.rbi deleted file mode 100644 index de57d2db..00000000 --- a/rbi/lib/openai/models/reasoning.rbi +++ /dev/null @@ -1,44 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class Reasoning < OpenAI::BaseModel - sig { returns(T.nilable(Symbol)) } - def effort - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def effort=(_) - end - - sig { returns(T.nilable(Symbol)) } - def generate_summary - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def generate_summary=(_) - end - - sig { params(effort: T.nilable(Symbol), generate_summary: T.nilable(Symbol)).returns(T.attached_class) } - def self.new(effort:, generate_summary: nil) - end - - sig { override.returns({effort: T.nilable(Symbol), generate_summary: T.nilable(Symbol)}) } - def to_hash - end - - class GenerateSummary < OpenAI::Enum - abstract! - - CONCISE = T.let(:concise, T.nilable(Symbol)) - DETAILED = T.let(:detailed, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/reasoning_effort.rbi b/rbi/lib/openai/models/reasoning_effort.rbi deleted file mode 100644 index b4182a8d..00000000 --- a/rbi/lib/openai/models/reasoning_effort.rbi +++ /dev/null @@ -1,19 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ReasoningEffort < OpenAI::Enum - abstract! - - LOW = T.let(:low, T.nilable(Symbol)) - MEDIUM = T.let(:medium, T.nilable(Symbol)) - HIGH = T.let(:high, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end -end diff --git a/rbi/lib/openai/models/response_format_json_object.rbi b/rbi/lib/openai/models/response_format_json_object.rbi deleted file mode 100644 index ffd5658c..00000000 --- a/rbi/lib/openai/models/response_format_json_object.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ResponseFormatJSONObject < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :json_object) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/response_format_json_schema.rbi b/rbi/lib/openai/models/response_format_json_schema.rbi deleted file mode 100644 index b622a63b..00000000 --- a/rbi/lib/openai/models/response_format_json_schema.rbi +++ /dev/null @@ -1,97 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ResponseFormatJSONSchema < OpenAI::BaseModel - sig { returns(OpenAI::Models::ResponseFormatJSONSchema::JSONSchema) } - def json_schema - end - - sig do - params(_: OpenAI::Models::ResponseFormatJSONSchema::JSONSchema) - .returns(OpenAI::Models::ResponseFormatJSONSchema::JSONSchema) - end - def json_schema=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(json_schema: OpenAI::Models::ResponseFormatJSONSchema::JSONSchema, type: Symbol) - .returns(T.attached_class) - end - def self.new(json_schema:, type: :json_schema) - end - - sig { override.returns({json_schema: OpenAI::Models::ResponseFormatJSONSchema::JSONSchema, type: Symbol}) } - def to_hash - end - - class JSONSchema < OpenAI::BaseModel - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(T.nilable(String)) } - def description - end - - sig { params(_: String).returns(String) } - def description=(_) - end - - sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } - def schema - end - - sig { params(_: T::Hash[Symbol, T.anything]).returns(T::Hash[Symbol, T.anything]) } - def schema=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def strict - end - - sig { params(_: T.nilable(T::Boolean)).returns(T.nilable(T::Boolean)) } - def strict=(_) - end - - sig do - params( - name: String, - description: String, - schema: T::Hash[Symbol, T.anything], - strict: T.nilable(T::Boolean) - ) - .returns(T.attached_class) - end - def self.new(name:, description: nil, schema: nil, strict: nil) - end - - sig do - override - .returns( - { - name: String, - description: String, - schema: T::Hash[Symbol, T.anything], - strict: T.nilable(T::Boolean) - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/response_format_text.rbi b/rbi/lib/openai/models/response_format_text.rbi deleted file mode 100644 index 6f3c8970..00000000 --- a/rbi/lib/openai/models/response_format_text.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class ResponseFormatText < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :text) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/responses/computer_tool.rbi b/rbi/lib/openai/models/responses/computer_tool.rbi deleted file mode 100644 index b6ba2c12..00000000 --- a/rbi/lib/openai/models/responses/computer_tool.rbi +++ /dev/null @@ -1,69 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ComputerTool < OpenAI::BaseModel - sig { returns(Float) } - def display_height - end - - sig { params(_: Float).returns(Float) } - def display_height=(_) - end - - sig { returns(Float) } - def display_width - end - - sig { params(_: Float).returns(Float) } - def display_width=(_) - end - - sig { returns(Symbol) } - def environment - end - - sig { params(_: Symbol).returns(Symbol) } - def environment=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(display_height: Float, display_width: Float, environment: Symbol, type: Symbol) - .returns(T.attached_class) - end - def self.new(display_height:, display_width:, environment:, type: :computer_use_preview) - end - - sig do - override.returns({display_height: Float, display_width: Float, environment: Symbol, type: Symbol}) - end - def to_hash - end - - class Environment < OpenAI::Enum - abstract! - - MAC = :mac - WINDOWS = :windows - UBUNTU = :ubuntu - BROWSER = :browser - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/easy_input_message.rbi b/rbi/lib/openai/models/responses/easy_input_message.rbi deleted file mode 100644 index 428515ec..00000000 --- a/rbi/lib/openai/models/responses/easy_input_message.rbi +++ /dev/null @@ -1,96 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class EasyInputMessage < OpenAI::BaseModel - sig { returns(T.any(String, OpenAI::Models::Responses::ResponseInputMessageContentList)) } - def content - end - - sig do - params(_: T.any(String, OpenAI::Models::Responses::ResponseInputMessageContentList)) - .returns(T.any(String, OpenAI::Models::Responses::ResponseInputMessageContentList)) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(Symbol)) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - content: T.any(String, OpenAI::Models::Responses::ResponseInputMessageContentList), - role: Symbol, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(content:, role:, type: nil) - end - - sig do - override - .returns( - {content: T.any(String, OpenAI::Models::Responses::ResponseInputMessageContentList), role: Symbol, type: Symbol} - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - class << self - sig do - override - .returns([[NilClass, String], [NilClass, OpenAI::Models::Responses::ResponseInputMessageContentList]]) - end - private def variants - end - end - end - - class Role < OpenAI::Enum - abstract! - - USER = :user - ASSISTANT = :assistant - SYSTEM = :system - DEVELOPER = :developer - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Type < OpenAI::Enum - abstract! - - MESSAGE = :message - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/file_search_tool.rbi b/rbi/lib/openai/models/responses/file_search_tool.rbi deleted file mode 100644 index 204c6f9d..00000000 --- a/rbi/lib/openai/models/responses/file_search_tool.rbi +++ /dev/null @@ -1,135 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class FileSearchTool < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T::Array[String]) } - def vector_store_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def vector_store_ids=(_) - end - - sig { returns(T.nilable(T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter))) } - def filters - end - - sig do - params(_: T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)) - .returns(T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)) - end - def filters=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_num_results - end - - sig { params(_: Integer).returns(Integer) } - def max_num_results=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Responses::FileSearchTool::RankingOptions)) } - def ranking_options - end - - sig do - params(_: OpenAI::Models::Responses::FileSearchTool::RankingOptions) - .returns(OpenAI::Models::Responses::FileSearchTool::RankingOptions) - end - def ranking_options=(_) - end - - sig do - params( - vector_store_ids: T::Array[String], - filters: T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter), - max_num_results: Integer, - ranking_options: OpenAI::Models::Responses::FileSearchTool::RankingOptions, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(vector_store_ids:, filters: nil, max_num_results: nil, ranking_options: nil, type: :file_search) - end - - sig do - override - .returns( - { - type: Symbol, - vector_store_ids: T::Array[String], - filters: T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter), - max_num_results: Integer, - ranking_options: OpenAI::Models::Responses::FileSearchTool::RankingOptions - } - ) - end - def to_hash - end - - class Filters < OpenAI::Union - abstract! - - class << self - sig do - override - .returns([[NilClass, OpenAI::Models::ComparisonFilter], [NilClass, OpenAI::Models::CompoundFilter]]) - end - private def variants - end - end - end - - class RankingOptions < OpenAI::BaseModel - sig { returns(T.nilable(Symbol)) } - def ranker - end - - sig { params(_: Symbol).returns(Symbol) } - def ranker=(_) - end - - sig { returns(T.nilable(Float)) } - def score_threshold - end - - sig { params(_: Float).returns(Float) } - def score_threshold=(_) - end - - sig { params(ranker: Symbol, score_threshold: Float).returns(T.attached_class) } - def self.new(ranker: nil, score_threshold: nil) - end - - sig { override.returns({ranker: Symbol, score_threshold: Float}) } - def to_hash - end - - class Ranker < OpenAI::Enum - abstract! - - AUTO = :auto - DEFAULT_2024_11_15 = :"default-2024-11-15" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/function_tool.rbi b/rbi/lib/openai/models/responses/function_tool.rbi deleted file mode 100644 index b1e8d293..00000000 --- a/rbi/lib/openai/models/responses/function_tool.rbi +++ /dev/null @@ -1,77 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class FunctionTool < OpenAI::BaseModel - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(T::Hash[Symbol, T.anything]) } - def parameters - end - - sig { params(_: T::Hash[Symbol, T.anything]).returns(T::Hash[Symbol, T.anything]) } - def parameters=(_) - end - - sig { returns(T::Boolean) } - def strict - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def strict=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def description - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def description=(_) - end - - sig do - params( - name: String, - parameters: T::Hash[Symbol, T.anything], - strict: T::Boolean, - description: T.nilable(String), - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(name:, parameters:, strict:, description: nil, type: :function) - end - - sig do - override - .returns( - { - name: String, - parameters: T::Hash[Symbol, T.anything], - strict: T::Boolean, - type: Symbol, - description: T.nilable(String) - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/input_item_list_params.rbi b/rbi/lib/openai/models/responses/input_item_list_params.rbi deleted file mode 100644 index 8f16ac93..00000000 --- a/rbi/lib/openai/models/responses/input_item_list_params.rbi +++ /dev/null @@ -1,85 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class InputItemListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(String)) } - def before - end - - sig { params(_: String).returns(String) } - def before=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig do - params( - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, before: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - override - .returns( - { - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response.rbi b/rbi/lib/openai/models/responses/response.rbi deleted file mode 100644 index c4764143..00000000 --- a/rbi/lib/openai/models/responses/response.rbi +++ /dev/null @@ -1,461 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class Response < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Float) } - def created_at - end - - sig { params(_: Float).returns(Float) } - def created_at=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Responses::ResponseError)) } - def error - end - - sig do - params(_: T.nilable(OpenAI::Models::Responses::ResponseError)) - .returns(T.nilable(OpenAI::Models::Responses::ResponseError)) - end - def error=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Responses::Response::IncompleteDetails)) } - def incomplete_details - end - - sig do - params(_: T.nilable(OpenAI::Models::Responses::Response::IncompleteDetails)) - .returns(T.nilable(OpenAI::Models::Responses::Response::IncompleteDetails)) - end - def incomplete_details=(_) - end - - sig { returns(T.nilable(String)) } - def instructions - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def instructions=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.any(String, Symbol)) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ] - ) - end - def output - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ] - ) - end - def output=(_) - end - - sig { returns(T::Boolean) } - def parallel_tool_calls - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def parallel_tool_calls=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def temperature=(_) - end - - sig do - returns( - T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction) - ) - end - def tool_choice - end - - sig do - params( - _: T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction) - ) - .returns( - T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction) - ) - end - def tool_choice=(_) - end - - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ] - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ] - ) - end - def tools=(_) - end - - sig { returns(T.nilable(Float)) } - def top_p - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def top_p=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_output_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_output_tokens=(_) - end - - sig { returns(T.nilable(String)) } - def previous_response_id - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def previous_response_id=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Reasoning)) } - def reasoning - end - - sig { params(_: T.nilable(OpenAI::Models::Reasoning)).returns(T.nilable(OpenAI::Models::Reasoning)) } - def reasoning=(_) - end - - sig { returns(T.nilable(Symbol)) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Responses::ResponseTextConfig)) } - def text - end - - sig do - params(_: OpenAI::Models::Responses::ResponseTextConfig) - .returns(OpenAI::Models::Responses::ResponseTextConfig) - end - def text=(_) - end - - sig { returns(T.nilable(Symbol)) } - def truncation - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def truncation=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Responses::ResponseUsage)) } - def usage - end - - sig { params(_: OpenAI::Models::Responses::ResponseUsage).returns(OpenAI::Models::Responses::ResponseUsage) } - def usage=(_) - end - - sig { returns(T.nilable(String)) } - def user - end - - sig { params(_: String).returns(String) } - def user=(_) - end - - sig do - params( - id: String, - created_at: Float, - error: T.nilable(OpenAI::Models::Responses::ResponseError), - incomplete_details: T.nilable(OpenAI::Models::Responses::Response::IncompleteDetails), - instructions: T.nilable(String), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.any(String, Symbol), - output: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ], - parallel_tool_calls: T::Boolean, - temperature: T.nilable(Float), - tool_choice: T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction), - tools: T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ], - top_p: T.nilable(Float), - max_output_tokens: T.nilable(Integer), - previous_response_id: T.nilable(String), - reasoning: T.nilable(OpenAI::Models::Reasoning), - status: Symbol, - text: OpenAI::Models::Responses::ResponseTextConfig, - truncation: T.nilable(Symbol), - usage: OpenAI::Models::Responses::ResponseUsage, - user: String, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - created_at:, - error:, - incomplete_details:, - instructions:, - metadata:, - model:, - output:, - parallel_tool_calls:, - temperature:, - tool_choice:, - tools:, - top_p:, - max_output_tokens: nil, - previous_response_id: nil, - reasoning: nil, - status: nil, - text: nil, - truncation: nil, - usage: nil, - user: nil, - object: :response - ) - end - - sig do - override - .returns( - { - id: String, - created_at: Float, - error: T.nilable(OpenAI::Models::Responses::ResponseError), - incomplete_details: T.nilable(OpenAI::Models::Responses::Response::IncompleteDetails), - instructions: T.nilable(String), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.any(String, Symbol), - object: Symbol, - output: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ], - parallel_tool_calls: T::Boolean, - temperature: T.nilable(Float), - tool_choice: T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction), - tools: T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ], - top_p: T.nilable(Float), - max_output_tokens: T.nilable(Integer), - previous_response_id: T.nilable(String), - reasoning: T.nilable(OpenAI::Models::Reasoning), - status: Symbol, - text: OpenAI::Models::Responses::ResponseTextConfig, - truncation: T.nilable(Symbol), - usage: OpenAI::Models::Responses::ResponseUsage, - user: String - } - ) - end - def to_hash - end - - class IncompleteDetails < OpenAI::BaseModel - sig { returns(T.nilable(Symbol)) } - def reason - end - - sig { params(_: Symbol).returns(Symbol) } - def reason=(_) - end - - sig { params(reason: Symbol).returns(T.attached_class) } - def self.new(reason: nil) - end - - sig { override.returns({reason: Symbol}) } - def to_hash - end - - class Reason < OpenAI::Enum - abstract! - - MAX_OUTPUT_TOKENS = :max_output_tokens - CONTENT_FILTER = :content_filter - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class ToolChoice < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[NilClass, Symbol], [NilClass, OpenAI::Models::Responses::ToolChoiceTypes], [NilClass, OpenAI::Models::Responses::ToolChoiceFunction]] - ) - end - private def variants - end - end - end - - class Truncation < OpenAI::Enum - abstract! - - AUTO = T.let(:auto, T.nilable(Symbol)) - DISABLED = T.let(:disabled, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_audio_delta_event.rbi b/rbi/lib/openai/models/responses/response_audio_delta_event.rbi deleted file mode 100644 index cecdd81e..00000000 --- a/rbi/lib/openai/models/responses/response_audio_delta_event.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseAudioDeltaEvent < OpenAI::BaseModel - sig { returns(String) } - def delta - end - - sig { params(_: String).returns(String) } - def delta=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(delta: String, type: Symbol).returns(T.attached_class) } - def self.new(delta:, type: :"response.audio.delta") - end - - sig { override.returns({delta: String, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_audio_done_event.rbi b/rbi/lib/openai/models/responses/response_audio_done_event.rbi deleted file mode 100644 index c67012a1..00000000 --- a/rbi/lib/openai/models/responses/response_audio_done_event.rbi +++ /dev/null @@ -1,25 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseAudioDoneEvent < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :"response.audio.done") - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi b/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi deleted file mode 100644 index fe5f4c18..00000000 --- a/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseAudioTranscriptDeltaEvent < OpenAI::BaseModel - sig { returns(String) } - def delta - end - - sig { params(_: String).returns(String) } - def delta=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(delta: String, type: Symbol).returns(T.attached_class) } - def self.new(delta:, type: :"response.audio.transcript.delta") - end - - sig { override.returns({delta: String, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi b/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi deleted file mode 100644 index 97204636..00000000 --- a/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi +++ /dev/null @@ -1,25 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseAudioTranscriptDoneEvent < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :"response.audio.transcript.done") - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi deleted file mode 100644 index 15a3e9c4..00000000 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::BaseModel - sig { returns(String) } - def delta - end - - sig { params(_: String).returns(String) } - def delta=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(delta: String, output_index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(delta:, output_index:, type: :"response.code_interpreter_call.code.delta") - end - - sig { override.returns({delta: String, output_index: Integer, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi deleted file mode 100644 index 25f31749..00000000 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::BaseModel - sig { returns(String) } - def code - end - - sig { params(_: String).returns(String) } - def code=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(code: String, output_index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(code:, output_index:, type: :"response.code_interpreter_call.code.done") - end - - sig { override.returns({code: String, output_index: Integer, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi deleted file mode 100644 index 93ae27fe..00000000 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi +++ /dev/null @@ -1,60 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseCodeInterpreterCallCompletedEvent < OpenAI::BaseModel - sig { returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) } - def code_interpreter_call - end - - sig do - params(_: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) - .returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) - end - def code_interpreter_call=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, - output_index: Integer, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(code_interpreter_call:, output_index:, type: :"response.code_interpreter_call.completed") - end - - sig do - override - .returns( - { - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, - output_index: Integer, - type: Symbol - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi deleted file mode 100644 index 815750a0..00000000 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +++ /dev/null @@ -1,60 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseCodeInterpreterCallInProgressEvent < OpenAI::BaseModel - sig { returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) } - def code_interpreter_call - end - - sig do - params(_: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) - .returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) - end - def code_interpreter_call=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, - output_index: Integer, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(code_interpreter_call:, output_index:, type: :"response.code_interpreter_call.in_progress") - end - - sig do - override - .returns( - { - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, - output_index: Integer, - type: Symbol - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi deleted file mode 100644 index fa22f0e6..00000000 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +++ /dev/null @@ -1,60 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseCodeInterpreterCallInterpretingEvent < OpenAI::BaseModel - sig { returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) } - def code_interpreter_call - end - - sig do - params(_: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) - .returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) - end - def code_interpreter_call=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, - output_index: Integer, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(code_interpreter_call:, output_index:, type: :"response.code_interpreter_call.interpreting") - end - - sig do - override - .returns( - { - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, - output_index: Integer, - type: Symbol - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi deleted file mode 100644 index d1a61cfa..00000000 --- a/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi +++ /dev/null @@ -1,237 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseCodeInterpreterToolCall < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def code - end - - sig { params(_: String).returns(String) } - def code=(_) - end - - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs, - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files - ) - ] - ) - end - def results - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs, - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs, - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files - ) - ] - ) - end - def results=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - id: String, - code: String, - results: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs, - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files - ) - ], - status: Symbol, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, code:, results:, status:, type: :code_interpreter_call) - end - - sig do - override - .returns( - { - id: String, - code: String, - results: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs, - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files - ) - ], - status: Symbol, - type: Symbol - } - ) - end - def to_hash - end - - class Result < OpenAI::Union - abstract! - - class Logs < OpenAI::BaseModel - sig { returns(String) } - def logs - end - - sig { params(_: String).returns(String) } - def logs=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(logs: String, type: Symbol).returns(T.attached_class) } - def self.new(logs:, type: :logs) - end - - sig { override.returns({logs: String, type: Symbol}) } - def to_hash - end - end - - class Files < OpenAI::BaseModel - sig { returns(T::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File]) } - def files - end - - sig do - params(_: T::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File]) - .returns(T::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File]) - end - def files=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - files: T::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File], - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(files:, type: :files) - end - - sig do - override - .returns( - { - files: T::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File], - type: Symbol - } - ) - end - def to_hash - end - - class File < OpenAI::BaseModel - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(String) } - def mime_type - end - - sig { params(_: String).returns(String) } - def mime_type=(_) - end - - sig { params(file_id: String, mime_type: String).returns(T.attached_class) } - def self.new(file_id:, mime_type:) - end - - sig { override.returns({file_id: String, mime_type: String}) } - def to_hash - end - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files]] - ) - end - private def variants - end - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - INTERPRETING = :interpreting - COMPLETED = :completed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_completed_event.rbi b/rbi/lib/openai/models/responses/response_completed_event.rbi deleted file mode 100644 index 6ae602db..00000000 --- a/rbi/lib/openai/models/responses/response_completed_event.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseCompletedEvent < OpenAI::BaseModel - sig { returns(OpenAI::Models::Responses::Response) } - def response - end - - sig { params(_: OpenAI::Models::Responses::Response).returns(OpenAI::Models::Responses::Response) } - def response=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } - def self.new(response:, type: :"response.completed") - end - - sig { override.returns({response: OpenAI::Models::Responses::Response, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_computer_tool_call.rbi b/rbi/lib/openai/models/responses/response_computer_tool_call.rbi deleted file mode 100644 index 9360dc6c..00000000 --- a/rbi/lib/openai/models/responses/response_computer_tool_call.rbi +++ /dev/null @@ -1,561 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseComputerToolCall < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig do - returns( - T.any( - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait - ) - ) - end - def action - end - - sig do - params( - _: T.any( - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait - ) - ) - .returns( - T.any( - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait - ) - ) - end - def action=(_) - end - - sig { returns(String) } - def call_id - end - - sig { params(_: String).returns(String) } - def call_id=(_) - end - - sig { returns(T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck]) } - def pending_safety_checks - end - - sig do - params(_: T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck]) - .returns(T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck]) - end - def pending_safety_checks=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - id: String, - action: T.any( - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait - ), - call_id: String, - pending_safety_checks: T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck], - status: Symbol, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, action:, call_id:, pending_safety_checks:, status:, type:) - end - - sig do - override - .returns( - { - id: String, - action: T.any( - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait - ), - call_id: String, - pending_safety_checks: T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck], - status: Symbol, - type: Symbol - } - ) - end - def to_hash - end - - class Action < OpenAI::Union - abstract! - - class Click < OpenAI::BaseModel - sig { returns(Symbol) } - def button - end - - sig { params(_: Symbol).returns(Symbol) } - def button=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(Integer) } - def x - end - - sig { params(_: Integer).returns(Integer) } - def x=(_) - end - - sig { returns(Integer) } - def y_ - end - - sig { params(_: Integer).returns(Integer) } - def y_=(_) - end - - sig { params(button: Symbol, x: Integer, y_: Integer, type: Symbol).returns(T.attached_class) } - def self.new(button:, x:, y_:, type: :click) - end - - sig { override.returns({button: Symbol, type: Symbol, x: Integer, y_: Integer}) } - def to_hash - end - - class Button < OpenAI::Enum - abstract! - - LEFT = :left - RIGHT = :right - WHEEL = :wheel - BACK = :back - FORWARD = :forward - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class DoubleClick < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(Integer) } - def x - end - - sig { params(_: Integer).returns(Integer) } - def x=(_) - end - - sig { returns(Integer) } - def y_ - end - - sig { params(_: Integer).returns(Integer) } - def y_=(_) - end - - sig { params(x: Integer, y_: Integer, type: Symbol).returns(T.attached_class) } - def self.new(x:, y_:, type: :double_click) - end - - sig { override.returns({type: Symbol, x: Integer, y_: Integer}) } - def to_hash - end - end - - class Drag < OpenAI::BaseModel - sig { returns(T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path]) } - def path - end - - sig do - params(_: T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path]) - .returns(T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path]) - end - def path=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - path: T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path], - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(path:, type: :drag) - end - - sig do - override - .returns( - {path: T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path], type: Symbol} - ) - end - def to_hash - end - - class Path < OpenAI::BaseModel - sig { returns(Integer) } - def x - end - - sig { params(_: Integer).returns(Integer) } - def x=(_) - end - - sig { returns(Integer) } - def y_ - end - - sig { params(_: Integer).returns(Integer) } - def y_=(_) - end - - sig { params(x: Integer, y_: Integer).returns(T.attached_class) } - def self.new(x:, y_:) - end - - sig { override.returns({x: Integer, y_: Integer}) } - def to_hash - end - end - end - - class Keypress < OpenAI::BaseModel - sig { returns(T::Array[String]) } - def keys - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def keys=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(keys: T::Array[String], type: Symbol).returns(T.attached_class) } - def self.new(keys:, type: :keypress) - end - - sig { override.returns({keys: T::Array[String], type: Symbol}) } - def to_hash - end - end - - class Move < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(Integer) } - def x - end - - sig { params(_: Integer).returns(Integer) } - def x=(_) - end - - sig { returns(Integer) } - def y_ - end - - sig { params(_: Integer).returns(Integer) } - def y_=(_) - end - - sig { params(x: Integer, y_: Integer, type: Symbol).returns(T.attached_class) } - def self.new(x:, y_:, type: :move) - end - - sig { override.returns({type: Symbol, x: Integer, y_: Integer}) } - def to_hash - end - end - - class Screenshot < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :screenshot) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - - class Scroll < OpenAI::BaseModel - sig { returns(Integer) } - def scroll_x - end - - sig { params(_: Integer).returns(Integer) } - def scroll_x=(_) - end - - sig { returns(Integer) } - def scroll_y - end - - sig { params(_: Integer).returns(Integer) } - def scroll_y=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(Integer) } - def x - end - - sig { params(_: Integer).returns(Integer) } - def x=(_) - end - - sig { returns(Integer) } - def y_ - end - - sig { params(_: Integer).returns(Integer) } - def y_=(_) - end - - sig do - params(scroll_x: Integer, scroll_y: Integer, x: Integer, y_: Integer, type: Symbol) - .returns(T.attached_class) - end - def self.new(scroll_x:, scroll_y:, x:, y_:, type: :scroll) - end - - sig do - override.returns({scroll_x: Integer, scroll_y: Integer, type: Symbol, x: Integer, y_: Integer}) - end - def to_hash - end - end - - class Type < OpenAI::BaseModel - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(text: String, type: Symbol).returns(T.attached_class) } - def self.new(text:, type: :type) - end - - sig { override.returns({text: String, type: Symbol}) } - def to_hash - end - end - - class Wait < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type: :wait) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait]] - ) - end - private def variants - end - end - end - - class PendingSafetyCheck < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def code - end - - sig { params(_: String).returns(String) } - def code=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { params(id: String, code: String, message: String).returns(T.attached_class) } - def self.new(id:, code:, message:) - end - - sig { override.returns({id: String, code: String, message: String}) } - def to_hash - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Type < OpenAI::Enum - abstract! - - COMPUTER_CALL = :computer_call - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_content.rbi b/rbi/lib/openai/models/responses/response_content.rbi deleted file mode 100644 index c2e6aed1..00000000 --- a/rbi/lib/openai/models/responses/response_content.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseContent < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[NilClass, OpenAI::Models::Responses::ResponseInputText], [NilClass, OpenAI::Models::Responses::ResponseInputImage], [NilClass, OpenAI::Models::Responses::ResponseInputFile], [NilClass, OpenAI::Models::Responses::ResponseOutputText], [NilClass, OpenAI::Models::Responses::ResponseOutputRefusal]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_content_part_added_event.rbi b/rbi/lib/openai/models/responses/response_content_part_added_event.rbi deleted file mode 100644 index 6280062a..00000000 --- a/rbi/lib/openai/models/responses/response_content_part_added_event.rbi +++ /dev/null @@ -1,103 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseContentPartAddedEvent < OpenAI::BaseModel - sig { returns(Integer) } - def content_index - end - - sig { params(_: Integer).returns(Integer) } - def content_index=(_) - end - - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig do - returns( - T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) - ) - end - def part - end - - sig do - params( - _: T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) - ) - .returns( - T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) - ) - end - def part=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - content_index: Integer, - item_id: String, - output_index: Integer, - part: T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal), - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(content_index:, item_id:, output_index:, part:, type: :"response.content_part.added") - end - - sig do - override - .returns( - { - content_index: Integer, - item_id: String, - output_index: Integer, - part: T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal), - type: Symbol - } - ) - end - def to_hash - end - - class Part < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseOutputText], [Symbol, OpenAI::Models::Responses::ResponseOutputRefusal]] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_content_part_done_event.rbi b/rbi/lib/openai/models/responses/response_content_part_done_event.rbi deleted file mode 100644 index 01ea5776..00000000 --- a/rbi/lib/openai/models/responses/response_content_part_done_event.rbi +++ /dev/null @@ -1,103 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseContentPartDoneEvent < OpenAI::BaseModel - sig { returns(Integer) } - def content_index - end - - sig { params(_: Integer).returns(Integer) } - def content_index=(_) - end - - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig do - returns( - T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) - ) - end - def part - end - - sig do - params( - _: T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) - ) - .returns( - T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) - ) - end - def part=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - content_index: Integer, - item_id: String, - output_index: Integer, - part: T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal), - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(content_index:, item_id:, output_index:, part:, type: :"response.content_part.done") - end - - sig do - override - .returns( - { - content_index: Integer, - item_id: String, - output_index: Integer, - part: T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal), - type: Symbol - } - ) - end - def to_hash - end - - class Part < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseOutputText], [Symbol, OpenAI::Models::Responses::ResponseOutputRefusal]] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_create_params.rbi b/rbi/lib/openai/models/responses/response_create_params.rbi deleted file mode 100644 index fe1d6f07..00000000 --- a/rbi/lib/openai/models/responses/response_create_params.rbi +++ /dev/null @@ -1,336 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(String, OpenAI::Models::Responses::ResponseInput)) } - def input - end - - sig do - params(_: T.any(String, OpenAI::Models::Responses::ResponseInput)) - .returns(T.any(String, OpenAI::Models::Responses::ResponseInput)) - end - def input=(_) - end - - sig { returns(T.any(String, Symbol)) } - def model - end - - sig { params(_: T.any(String, Symbol)).returns(T.any(String, Symbol)) } - def model=(_) - end - - sig { returns(T.nilable(T::Array[Symbol])) } - def include - end - - sig { params(_: T.nilable(T::Array[Symbol])).returns(T.nilable(T::Array[Symbol])) } - def include=(_) - end - - sig { returns(T.nilable(String)) } - def instructions - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def instructions=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_output_tokens - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_output_tokens=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def parallel_tool_calls - end - - sig { params(_: T.nilable(T::Boolean)).returns(T.nilable(T::Boolean)) } - def parallel_tool_calls=(_) - end - - sig { returns(T.nilable(String)) } - def previous_response_id - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def previous_response_id=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Reasoning)) } - def reasoning - end - - sig { params(_: T.nilable(OpenAI::Models::Reasoning)).returns(T.nilable(OpenAI::Models::Reasoning)) } - def reasoning=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def store - end - - sig { params(_: T.nilable(T::Boolean)).returns(T.nilable(T::Boolean)) } - def store=(_) - end - - sig { returns(T.nilable(Float)) } - def temperature - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def temperature=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Responses::ResponseTextConfig)) } - def text - end - - sig do - params(_: OpenAI::Models::Responses::ResponseTextConfig) - .returns(OpenAI::Models::Responses::ResponseTextConfig) - end - def text=(_) - end - - sig do - returns( - T.nilable( - T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction) - ) - ) - end - def tool_choice - end - - sig do - params( - _: T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction) - ) - .returns( - T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction) - ) - end - def tool_choice=(_) - end - - sig do - returns( - T.nilable( - T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ] - ) - ) - end - def tools - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ] - ) - end - def tools=(_) - end - - sig { returns(T.nilable(Float)) } - def top_p - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def top_p=(_) - end - - sig { returns(T.nilable(Symbol)) } - def truncation - end - - sig { params(_: T.nilable(Symbol)).returns(T.nilable(Symbol)) } - def truncation=(_) - end - - sig { returns(T.nilable(String)) } - def user - end - - sig { params(_: String).returns(String) } - def user=(_) - end - - sig do - params( - input: T.any(String, OpenAI::Models::Responses::ResponseInput), - model: T.any(String, Symbol), - include: T.nilable(T::Array[Symbol]), - instructions: T.nilable(String), - max_output_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - parallel_tool_calls: T.nilable(T::Boolean), - previous_response_id: T.nilable(String), - reasoning: T.nilable(OpenAI::Models::Reasoning), - store: T.nilable(T::Boolean), - temperature: T.nilable(Float), - text: OpenAI::Models::Responses::ResponseTextConfig, - tool_choice: T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction), - tools: T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ], - top_p: T.nilable(Float), - truncation: T.nilable(Symbol), - user: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new( - input:, - model:, - include: nil, - instructions: nil, - max_output_tokens: nil, - metadata: nil, - parallel_tool_calls: nil, - previous_response_id: nil, - reasoning: nil, - store: nil, - temperature: nil, - text: nil, - tool_choice: nil, - tools: nil, - top_p: nil, - truncation: nil, - user: nil, - request_options: {} - ) - end - - sig do - override - .returns( - { - input: T.any(String, OpenAI::Models::Responses::ResponseInput), - model: T.any(String, Symbol), - include: T.nilable(T::Array[Symbol]), - instructions: T.nilable(String), - max_output_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - parallel_tool_calls: T.nilable(T::Boolean), - previous_response_id: T.nilable(String), - reasoning: T.nilable(OpenAI::Models::Reasoning), - store: T.nilable(T::Boolean), - temperature: T.nilable(Float), - text: OpenAI::Models::Responses::ResponseTextConfig, - tool_choice: T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction), - tools: T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ], - top_p: T.nilable(Float), - truncation: T.nilable(Symbol), - user: String, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Input < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, OpenAI::Models::Responses::ResponseInput]]) } - private def variants - end - end - end - - class Model < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } - private def variants - end - end - end - - class ToolChoice < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[NilClass, Symbol], [NilClass, OpenAI::Models::Responses::ToolChoiceTypes], [NilClass, OpenAI::Models::Responses::ToolChoiceFunction]] - ) - end - private def variants - end - end - end - - class Truncation < OpenAI::Enum - abstract! - - AUTO = T.let(:auto, T.nilable(Symbol)) - DISABLED = T.let(:disabled, T.nilable(Symbol)) - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_created_event.rbi b/rbi/lib/openai/models/responses/response_created_event.rbi deleted file mode 100644 index 2a500348..00000000 --- a/rbi/lib/openai/models/responses/response_created_event.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseCreatedEvent < OpenAI::BaseModel - sig { returns(OpenAI::Models::Responses::Response) } - def response - end - - sig { params(_: OpenAI::Models::Responses::Response).returns(OpenAI::Models::Responses::Response) } - def response=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } - def self.new(response:, type: :"response.created") - end - - sig { override.returns({response: OpenAI::Models::Responses::Response, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_delete_params.rbi b/rbi/lib/openai/models/responses/response_delete_params.rbi deleted file mode 100644 index b268b24b..00000000 --- a/rbi/lib/openai/models/responses/response_delete_params.rbi +++ /dev/null @@ -1,23 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_error.rbi b/rbi/lib/openai/models/responses/response_error.rbi deleted file mode 100644 index 367eea43..00000000 --- a/rbi/lib/openai/models/responses/response_error.rbi +++ /dev/null @@ -1,62 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseError < OpenAI::BaseModel - sig { returns(Symbol) } - def code - end - - sig { params(_: Symbol).returns(Symbol) } - def code=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { params(code: Symbol, message: String).returns(T.attached_class) } - def self.new(code:, message:) - end - - sig { override.returns({code: Symbol, message: String}) } - def to_hash - end - - class Code < OpenAI::Enum - abstract! - - SERVER_ERROR = :server_error - RATE_LIMIT_EXCEEDED = :rate_limit_exceeded - INVALID_PROMPT = :invalid_prompt - VECTOR_STORE_TIMEOUT = :vector_store_timeout - INVALID_IMAGE = :invalid_image - INVALID_IMAGE_FORMAT = :invalid_image_format - INVALID_BASE64_IMAGE = :invalid_base64_image - INVALID_IMAGE_URL = :invalid_image_url - IMAGE_TOO_LARGE = :image_too_large - IMAGE_TOO_SMALL = :image_too_small - IMAGE_PARSE_ERROR = :image_parse_error - IMAGE_CONTENT_POLICY_VIOLATION = :image_content_policy_violation - INVALID_IMAGE_MODE = :invalid_image_mode - IMAGE_FILE_TOO_LARGE = :image_file_too_large - UNSUPPORTED_IMAGE_MEDIA_TYPE = :unsupported_image_media_type - EMPTY_IMAGE_FILE = :empty_image_file - FAILED_TO_DOWNLOAD_IMAGE = :failed_to_download_image - IMAGE_FILE_NOT_FOUND = :image_file_not_found - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_error_event.rbi b/rbi/lib/openai/models/responses/response_error_event.rbi deleted file mode 100644 index 03c5b3b3..00000000 --- a/rbi/lib/openai/models/responses/response_error_event.rbi +++ /dev/null @@ -1,54 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseErrorEvent < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def code - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def code=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { returns(T.nilable(String)) } - def param - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def param=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(code: T.nilable(String), message: String, param: T.nilable(String), type: Symbol) - .returns(T.attached_class) - end - def self.new(code:, message:, param:, type: :error) - end - - sig do - override.returns({code: T.nilable(String), message: String, param: T.nilable(String), type: Symbol}) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_failed_event.rbi b/rbi/lib/openai/models/responses/response_failed_event.rbi deleted file mode 100644 index 5c3f69cb..00000000 --- a/rbi/lib/openai/models/responses/response_failed_event.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFailedEvent < OpenAI::BaseModel - sig { returns(OpenAI::Models::Responses::Response) } - def response - end - - sig { params(_: OpenAI::Models::Responses::Response).returns(OpenAI::Models::Responses::Response) } - def response=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } - def self.new(response:, type: :"response.failed") - end - - sig { override.returns({response: OpenAI::Models::Responses::Response, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi b/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi deleted file mode 100644 index 77ecf89c..00000000 --- a/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFileSearchCallCompletedEvent < OpenAI::BaseModel - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(item_id:, output_index:, type: :"response.file_search_call.completed") - end - - sig { override.returns({item_id: String, output_index: Integer, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi deleted file mode 100644 index f4d4c09f..00000000 --- a/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFileSearchCallInProgressEvent < OpenAI::BaseModel - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(item_id:, output_index:, type: :"response.file_search_call.in_progress") - end - - sig { override.returns({item_id: String, output_index: Integer, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi b/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi deleted file mode 100644 index 3ea7ffd2..00000000 --- a/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFileSearchCallSearchingEvent < OpenAI::BaseModel - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(item_id:, output_index:, type: :"response.file_search_call.searching") - end - - sig { override.returns({item_id: String, output_index: Integer, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi b/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi deleted file mode 100644 index c0c8564d..00000000 --- a/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi +++ /dev/null @@ -1,179 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFileSearchToolCall < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Array[String]) } - def queries - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def queries=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(T::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result])) } - def results - end - - sig do - params(_: T.nilable(T::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result])) - .returns(T.nilable(T::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result])) - end - def results=(_) - end - - sig do - params( - id: String, - queries: T::Array[String], - status: Symbol, - results: T.nilable(T::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result]), - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, queries:, status:, results: nil, type: :file_search_call) - end - - sig do - override - .returns( - { - id: String, - queries: T::Array[String], - status: Symbol, - type: Symbol, - results: T.nilable(T::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result]) - } - ) - end - def to_hash - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - SEARCHING = :searching - COMPLETED = :completed - INCOMPLETE = :incomplete - FAILED = :failed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Result < OpenAI::BaseModel - sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } - def attributes - end - - sig do - params(_: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - .returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - end - def attributes=(_) - end - - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(T.nilable(String)) } - def filename - end - - sig { params(_: String).returns(String) } - def filename=(_) - end - - sig { returns(T.nilable(Float)) } - def score - end - - sig { params(_: Float).returns(Float) } - def score=(_) - end - - sig { returns(T.nilable(String)) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig do - params( - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - file_id: String, - filename: String, - score: Float, - text: String - ) - .returns(T.attached_class) - end - def self.new(attributes: nil, file_id: nil, filename: nil, score: nil, text: nil) - end - - sig do - override - .returns( - { - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - file_id: String, - filename: String, - score: Float, - text: String - } - ) - end - def to_hash - end - - class Attribute < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_format_text_config.rbi b/rbi/lib/openai/models/responses/response_format_text_config.rbi deleted file mode 100644 index 170c0610..00000000 --- a/rbi/lib/openai/models/responses/response_format_text_config.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFormatTextConfig < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::ResponseFormatText], [Symbol, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig], [Symbol, OpenAI::Models::ResponseFormatJSONObject]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi b/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi deleted file mode 100644 index 4899b49f..00000000 --- a/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi +++ /dev/null @@ -1,77 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFormatTextJSONSchemaConfig < OpenAI::BaseModel - sig { returns(T::Hash[Symbol, T.anything]) } - def schema - end - - sig { params(_: T::Hash[Symbol, T.anything]).returns(T::Hash[Symbol, T.anything]) } - def schema=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def description - end - - sig { params(_: String).returns(String) } - def description=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def strict - end - - sig { params(_: T.nilable(T::Boolean)).returns(T.nilable(T::Boolean)) } - def strict=(_) - end - - sig do - params( - schema: T::Hash[Symbol, T.anything], - description: String, - name: String, - strict: T.nilable(T::Boolean), - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(schema:, description: nil, name: nil, strict: nil, type: :json_schema) - end - - sig do - override - .returns( - { - schema: T::Hash[Symbol, T.anything], - type: Symbol, - description: String, - name: String, - strict: T.nilable(T::Boolean) - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi b/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi deleted file mode 100644 index c543936a..00000000 --- a/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi +++ /dev/null @@ -1,56 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::BaseModel - sig { returns(String) } - def delta - end - - sig { params(_: String).returns(String) } - def delta=(_) - end - - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - delta: String, - item_id: String, - output_index: Integer, - type: Symbol - ).returns(T.attached_class) - end - def self.new(delta:, item_id:, output_index:, type: :"response.function_call_arguments.delta") - end - - sig { override.returns({delta: String, item_id: String, output_index: Integer, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi b/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi deleted file mode 100644 index 17234bf9..00000000 --- a/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi +++ /dev/null @@ -1,56 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFunctionCallArgumentsDoneEvent < OpenAI::BaseModel - sig { returns(String) } - def arguments - end - - sig { params(_: String).returns(String) } - def arguments=(_) - end - - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - arguments: String, - item_id: String, - output_index: Integer, - type: Symbol - ).returns(T.attached_class) - end - def self.new(arguments:, item_id:, output_index:, type: :"response.function_call_arguments.done") - end - - sig { override.returns({arguments: String, item_id: String, output_index: Integer, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_function_tool_call.rbi b/rbi/lib/openai/models/responses/response_function_tool_call.rbi deleted file mode 100644 index 97c8db84..00000000 --- a/rbi/lib/openai/models/responses/response_function_tool_call.rbi +++ /dev/null @@ -1,92 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFunctionToolCall < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def arguments - end - - sig { params(_: String).returns(String) } - def arguments=(_) - end - - sig { returns(String) } - def call_id - end - - sig { params(_: String).returns(String) } - def call_id=(_) - end - - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(Symbol)) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig do - params(id: String, arguments: String, call_id: String, name: String, status: Symbol, type: Symbol) - .returns(T.attached_class) - end - def self.new(id:, arguments:, call_id:, name:, status: nil, type: :function_call) - end - - sig do - override - .returns({ - id: String, - arguments: String, - call_id: String, - name: String, - type: Symbol, - status: Symbol - }) - end - def to_hash - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_function_web_search.rbi b/rbi/lib/openai/models/responses/response_function_web_search.rbi deleted file mode 100644 index fc8ec7a6..00000000 --- a/rbi/lib/openai/models/responses/response_function_web_search.rbi +++ /dev/null @@ -1,56 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseFunctionWebSearch < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(id: String, status: Symbol, type: Symbol).returns(T.attached_class) } - def self.new(id:, status:, type: :web_search_call) - end - - sig { override.returns({id: String, status: Symbol, type: Symbol}) } - def to_hash - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - SEARCHING = :searching - COMPLETED = :completed - FAILED = :failed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_in_progress_event.rbi deleted file mode 100644 index ad20d756..00000000 --- a/rbi/lib/openai/models/responses/response_in_progress_event.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseInProgressEvent < OpenAI::BaseModel - sig { returns(OpenAI::Models::Responses::Response) } - def response - end - - sig { params(_: OpenAI::Models::Responses::Response).returns(OpenAI::Models::Responses::Response) } - def response=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } - def self.new(response:, type: :"response.in_progress") - end - - sig { override.returns({response: OpenAI::Models::Responses::Response, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_includable.rbi b/rbi/lib/openai/models/responses/response_includable.rbi deleted file mode 100644 index 6eab634e..00000000 --- a/rbi/lib/openai/models/responses/response_includable.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseIncludable < OpenAI::Enum - abstract! - - FILE_SEARCH_CALL_RESULTS = :"file_search_call.results" - MESSAGE_INPUT_IMAGE_IMAGE_URL = :"message.input_image.image_url" - COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = :"computer_call_output.output.image_url" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_incomplete_event.rbi b/rbi/lib/openai/models/responses/response_incomplete_event.rbi deleted file mode 100644 index ec47033d..00000000 --- a/rbi/lib/openai/models/responses/response_incomplete_event.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseIncompleteEvent < OpenAI::BaseModel - sig { returns(OpenAI::Models::Responses::Response) } - def response - end - - sig { params(_: OpenAI::Models::Responses::Response).returns(OpenAI::Models::Responses::Response) } - def response=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } - def self.new(response:, type: :"response.incomplete") - end - - sig { override.returns({response: OpenAI::Models::Responses::Response, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_input.rbi b/rbi/lib/openai/models/responses/response_input.rbi deleted file mode 100644 index 622e3348..00000000 --- a/rbi/lib/openai/models/responses/response_input.rbi +++ /dev/null @@ -1,25 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - ResponseInput = T.type_alias do - T::Array[ - T.any( - OpenAI::Models::Responses::EasyInputMessage, - OpenAI::Models::Responses::ResponseInputItem::Message, - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, - OpenAI::Models::Responses::ResponseReasoningItem, - OpenAI::Models::Responses::ResponseInputItem::ItemReference - ) - ] - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_input_audio.rbi b/rbi/lib/openai/models/responses/response_input_audio.rbi deleted file mode 100644 index d7f1eee2..00000000 --- a/rbi/lib/openai/models/responses/response_input_audio.rbi +++ /dev/null @@ -1,54 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseInputAudio < OpenAI::BaseModel - sig { returns(String) } - def data - end - - sig { params(_: String).returns(String) } - def data=(_) - end - - sig { returns(Symbol) } - def format_ - end - - sig { params(_: Symbol).returns(Symbol) } - def format_=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(data: String, format_: Symbol, type: Symbol).returns(T.attached_class) } - def self.new(data:, format_:, type: :input_audio) - end - - sig { override.returns({data: String, format_: Symbol, type: Symbol}) } - def to_hash - end - - class Format < OpenAI::Enum - abstract! - - MP3 = :mp3 - WAV = :wav - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_input_content.rbi b/rbi/lib/openai/models/responses/response_input_content.rbi deleted file mode 100644 index 5857ddd3..00000000 --- a/rbi/lib/openai/models/responses/response_input_content.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseInputContent < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseInputText], [Symbol, OpenAI::Models::Responses::ResponseInputImage], [Symbol, OpenAI::Models::Responses::ResponseInputFile]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_input_file.rbi b/rbi/lib/openai/models/responses/response_input_file.rbi deleted file mode 100644 index e6245523..00000000 --- a/rbi/lib/openai/models/responses/response_input_file.rbi +++ /dev/null @@ -1,51 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseInputFile < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def file_data - end - - sig { params(_: String).returns(String) } - def file_data=(_) - end - - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(T.nilable(String)) } - def filename - end - - sig { params(_: String).returns(String) } - def filename=(_) - end - - sig do - params(file_data: String, file_id: String, filename: String, type: Symbol).returns(T.attached_class) - end - def self.new(file_data: nil, file_id: nil, filename: nil, type: :input_file) - end - - sig { override.returns({type: Symbol, file_data: String, file_id: String, filename: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_input_image.rbi b/rbi/lib/openai/models/responses/response_input_image.rbi deleted file mode 100644 index 62bd604a..00000000 --- a/rbi/lib/openai/models/responses/response_input_image.rbi +++ /dev/null @@ -1,75 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseInputImage < OpenAI::BaseModel - sig { returns(Symbol) } - def detail - end - - sig { params(_: Symbol).returns(Symbol) } - def detail=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def file_id=(_) - end - - sig { returns(T.nilable(String)) } - def image_url - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def image_url=(_) - end - - sig do - params(detail: Symbol, file_id: T.nilable(String), image_url: T.nilable(String), type: Symbol) - .returns(T.attached_class) - end - def self.new(detail:, file_id: nil, image_url: nil, type: :input_image) - end - - sig do - override.returns( - { - detail: Symbol, - type: Symbol, - file_id: T.nilable(String), - image_url: T.nilable(String) - } - ) - end - def to_hash - end - - class Detail < OpenAI::Enum - abstract! - - HIGH = :high - LOW = :low - AUTO = :auto - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_input_item.rbi b/rbi/lib/openai/models/responses/response_input_item.rbi deleted file mode 100644 index a2ac733a..00000000 --- a/rbi/lib/openai/models/responses/response_input_item.rbi +++ /dev/null @@ -1,398 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseInputItem < OpenAI::Union - abstract! - - class Message < OpenAI::BaseModel - sig { returns(OpenAI::Models::Responses::ResponseInputMessageContentList) } - def content - end - - sig do - params(_: OpenAI::Models::Responses::ResponseInputMessageContentList) - .returns(OpenAI::Models::Responses::ResponseInputMessageContentList) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(Symbol)) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(T.nilable(Symbol)) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - content: OpenAI::Models::Responses::ResponseInputMessageContentList, - role: Symbol, - status: Symbol, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(content:, role:, status: nil, type: nil) - end - - sig do - override - .returns( - {content: OpenAI::Models::Responses::ResponseInputMessageContentList, role: Symbol, status: Symbol, type: Symbol} - ) - end - def to_hash - end - - class Role < OpenAI::Enum - abstract! - - USER = :user - SYSTEM = :system - DEVELOPER = :developer - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Type < OpenAI::Enum - abstract! - - MESSAGE = :message - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class ComputerCallOutput < OpenAI::BaseModel - sig { returns(String) } - def call_id - end - - sig { params(_: String).returns(String) } - def call_id=(_) - end - - sig { returns(OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output) } - def output - end - - sig do - params(_: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output) - .returns(OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output) - end - def output=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig do - returns( - T.nilable( - T::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck] - ) - ) - end - def acknowledged_safety_checks - end - - sig do - params( - _: T::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck] - ) - .returns( - T::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck] - ) - end - def acknowledged_safety_checks=(_) - end - - sig { returns(T.nilable(Symbol)) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig do - params( - call_id: String, - output: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output, - id: String, - acknowledged_safety_checks: T::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck], - status: Symbol, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(call_id:, output:, id: nil, acknowledged_safety_checks: nil, status: nil, type: :computer_call_output) - end - - sig do - override - .returns( - { - call_id: String, - output: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output, - type: Symbol, - id: String, - acknowledged_safety_checks: T::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck], - status: Symbol - } - ) - end - def to_hash - end - - class Output < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(T.nilable(String)) } - def image_url - end - - sig { params(_: String).returns(String) } - def image_url=(_) - end - - sig { params(file_id: String, image_url: String, type: Symbol).returns(T.attached_class) } - def self.new(file_id: nil, image_url: nil, type: :computer_screenshot) - end - - sig { override.returns({type: Symbol, file_id: String, image_url: String}) } - def to_hash - end - end - - class AcknowledgedSafetyCheck < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def code - end - - sig { params(_: String).returns(String) } - def code=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { params(id: String, code: String, message: String).returns(T.attached_class) } - def self.new(id:, code:, message:) - end - - sig { override.returns({id: String, code: String, message: String}) } - def to_hash - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class FunctionCallOutput < OpenAI::BaseModel - sig { returns(String) } - def call_id - end - - sig { params(_: String).returns(String) } - def call_id=(_) - end - - sig { returns(String) } - def output - end - - sig { params(_: String).returns(String) } - def output=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T.nilable(Symbol)) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig do - params( - call_id: String, - output: String, - id: String, - status: Symbol, - type: Symbol - ).returns(T.attached_class) - end - def self.new(call_id:, output:, id: nil, status: nil, type: :function_call_output) - end - - sig do - override.returns({call_id: String, output: String, type: Symbol, id: String, status: Symbol}) - end - def to_hash - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class ItemReference < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(id: String, type: Symbol).returns(T.attached_class) } - def self.new(id:, type: :item_reference) - end - - sig { override.returns({id: String, type: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::EasyInputMessage], [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message], [Symbol, OpenAI::Models::Responses::ResponseOutputMessage], [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall], [Symbol, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput], [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch], [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall], [Symbol, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput], [Symbol, OpenAI::Models::Responses::ResponseReasoningItem], [Symbol, OpenAI::Models::Responses::ResponseInputItem::ItemReference]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_input_message_content_list.rbi b/rbi/lib/openai/models/responses/response_input_message_content_list.rbi deleted file mode 100644 index d4c36f8d..00000000 --- a/rbi/lib/openai/models/responses/response_input_message_content_list.rbi +++ /dev/null @@ -1,17 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - ResponseInputMessageContentList = T.type_alias do - T::Array[ - T.any( - OpenAI::Models::Responses::ResponseInputText, - OpenAI::Models::Responses::ResponseInputImage, - OpenAI::Models::Responses::ResponseInputFile - ) - ] - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_input_text.rbi b/rbi/lib/openai/models/responses/response_input_text.rbi deleted file mode 100644 index df900197..00000000 --- a/rbi/lib/openai/models/responses/response_input_text.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseInputText < OpenAI::BaseModel - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(text: String, type: Symbol).returns(T.attached_class) } - def self.new(text:, type: :input_text) - end - - sig { override.returns({text: String, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_item_list.rbi b/rbi/lib/openai/models/responses/response_item_list.rbi deleted file mode 100644 index c24e41f8..00000000 --- a/rbi/lib/openai/models/responses/response_item_list.rbi +++ /dev/null @@ -1,525 +0,0 @@ -# typed: strong - -module OpenAI - module Models - ResponseItemList = T.type_alias { Responses::ResponseItemList } - - module Responses - class ResponseItemList < OpenAI::BaseModel - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Responses::ResponseItemList::Data::Message, - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput - ) - ] - ) - end - def data - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseItemList::Data::Message, - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Responses::ResponseItemList::Data::Message, - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput - ) - ] - ) - end - def data=(_) - end - - sig { returns(String) } - def first_id - end - - sig { params(_: String).returns(String) } - def first_id=(_) - end - - sig { returns(T::Boolean) } - def has_more - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def has_more=(_) - end - - sig { returns(String) } - def last_id - end - - sig { params(_: String).returns(String) } - def last_id=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig do - params( - data: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseItemList::Data::Message, - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput - ) - ], - first_id: String, - has_more: T::Boolean, - last_id: String, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new(data:, first_id:, has_more:, last_id:, object: :list) - end - - sig do - override - .returns( - { - data: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseItemList::Data::Message, - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput - ) - ], - first_id: String, - has_more: T::Boolean, - last_id: String, - object: Symbol - } - ) - end - def to_hash - end - - class Data < OpenAI::Union - abstract! - - class Message < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(OpenAI::Models::Responses::ResponseInputMessageContentList) } - def content - end - - sig do - params(_: OpenAI::Models::Responses::ResponseInputMessageContentList) - .returns(OpenAI::Models::Responses::ResponseInputMessageContentList) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(T.nilable(Symbol)) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(T.nilable(Symbol)) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - id: String, - content: OpenAI::Models::Responses::ResponseInputMessageContentList, - role: Symbol, - status: Symbol, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, content:, role:, status: nil, type: nil) - end - - sig do - override - .returns( - { - id: String, - content: OpenAI::Models::Responses::ResponseInputMessageContentList, - role: Symbol, - status: Symbol, - type: Symbol - } - ) - end - def to_hash - end - - class Role < OpenAI::Enum - abstract! - - USER = :user - SYSTEM = :system - DEVELOPER = :developer - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Type < OpenAI::Enum - abstract! - - MESSAGE = :message - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class ComputerCallOutput < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def call_id - end - - sig { params(_: String).returns(String) } - def call_id=(_) - end - - sig { returns(OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output) } - def output - end - - sig do - params(_: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output) - .returns(OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output) - end - def output=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - returns( - T.nilable( - T::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck] - ) - ) - end - def acknowledged_safety_checks - end - - sig do - params( - _: T::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck] - ) - .returns( - T::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck] - ) - end - def acknowledged_safety_checks=(_) - end - - sig { returns(T.nilable(Symbol)) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig do - params( - id: String, - call_id: String, - output: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output, - acknowledged_safety_checks: T::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck], - status: Symbol, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, call_id:, output:, acknowledged_safety_checks: nil, status: nil, type: :computer_call_output) - end - - sig do - override - .returns( - { - id: String, - call_id: String, - output: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output, - type: Symbol, - acknowledged_safety_checks: T::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck], - status: Symbol - } - ) - end - def to_hash - end - - class Output < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(T.nilable(String)) } - def image_url - end - - sig { params(_: String).returns(String) } - def image_url=(_) - end - - sig { params(file_id: String, image_url: String, type: Symbol).returns(T.attached_class) } - def self.new(file_id: nil, image_url: nil, type: :computer_screenshot) - end - - sig { override.returns({type: Symbol, file_id: String, image_url: String}) } - def to_hash - end - end - - class AcknowledgedSafetyCheck < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def code - end - - sig { params(_: String).returns(String) } - def code=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { params(id: String, code: String, message: String).returns(T.attached_class) } - def self.new(id:, code:, message:) - end - - sig { override.returns({id: String, code: String, message: String}) } - def to_hash - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class FunctionCallOutput < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(String) } - def call_id - end - - sig { params(_: String).returns(String) } - def call_id=(_) - end - - sig { returns(String) } - def output - end - - sig { params(_: String).returns(String) } - def output=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(Symbol)) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig do - params( - id: String, - call_id: String, - output: String, - status: Symbol, - type: Symbol - ).returns(T.attached_class) - end - def self.new(id:, call_id:, output:, status: nil, type: :function_call_output) - end - - sig do - override.returns({id: String, call_id: String, output: String, type: Symbol, status: Symbol}) - end - def to_hash - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseItemList::Data::Message], [Symbol, OpenAI::Models::Responses::ResponseOutputMessage], [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall], [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput], [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch], [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall], [Symbol, OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput]] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_output_audio.rbi b/rbi/lib/openai/models/responses/response_output_audio.rbi deleted file mode 100644 index 1aed1ddd..00000000 --- a/rbi/lib/openai/models/responses/response_output_audio.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseOutputAudio < OpenAI::BaseModel - sig { returns(String) } - def data - end - - sig { params(_: String).returns(String) } - def data=(_) - end - - sig { returns(String) } - def transcript - end - - sig { params(_: String).returns(String) } - def transcript=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(data: String, transcript: String, type: Symbol).returns(T.attached_class) } - def self.new(data:, transcript:, type: :output_audio) - end - - sig { override.returns({data: String, transcript: String, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_output_item.rbi b/rbi/lib/openai/models/responses/response_output_item.rbi deleted file mode 100644 index 8a2bf039..00000000 --- a/rbi/lib/openai/models/responses/response_output_item.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseOutputItem < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseOutputMessage], [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall], [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall], [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch], [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall], [Symbol, OpenAI::Models::Responses::ResponseReasoningItem]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_output_item_added_event.rbi b/rbi/lib/openai/models/responses/response_output_item_added_event.rbi deleted file mode 100644 index 837047df..00000000 --- a/rbi/lib/openai/models/responses/response_output_item_added_event.rbi +++ /dev/null @@ -1,103 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseOutputItemAddedEvent < OpenAI::BaseModel - sig do - returns( - T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ) - end - def item - end - - sig do - params( - _: T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ) - .returns( - T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ) - end - def item=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - item: T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ), - output_index: Integer, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(item:, output_index:, type: :"response.output_item.added") - end - - sig do - override - .returns( - { - item: T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ), - output_index: Integer, - type: Symbol - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_output_item_done_event.rbi b/rbi/lib/openai/models/responses/response_output_item_done_event.rbi deleted file mode 100644 index 75961890..00000000 --- a/rbi/lib/openai/models/responses/response_output_item_done_event.rbi +++ /dev/null @@ -1,103 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseOutputItemDoneEvent < OpenAI::BaseModel - sig do - returns( - T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ) - end - def item - end - - sig do - params( - _: T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ) - .returns( - T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ) - ) - end - def item=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - item: T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ), - output_index: Integer, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(item:, output_index:, type: :"response.output_item.done") - end - - sig do - override - .returns( - { - item: T.any( - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseReasoningItem - ), - output_index: Integer, - type: Symbol - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_output_message.rbi b/rbi/lib/openai/models/responses/response_output_message.rbi deleted file mode 100644 index d2cbf773..00000000 --- a/rbi/lib/openai/models/responses/response_output_message.rbi +++ /dev/null @@ -1,117 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseOutputMessage < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig do - returns( - T::Array[T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] - ) - end - def content - end - - sig do - params( - _: T::Array[T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] - ) - .returns( - T::Array[T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] - ) - end - def content=(_) - end - - sig { returns(Symbol) } - def role - end - - sig { params(_: Symbol).returns(Symbol) } - def role=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - id: String, - content: T::Array[T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)], - status: Symbol, - role: Symbol, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, content:, status:, role: :assistant, type: :message) - end - - sig do - override - .returns( - { - id: String, - content: T::Array[T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)], - role: Symbol, - status: Symbol, - type: Symbol - } - ) - end - def to_hash - end - - class Content < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseOutputText], [Symbol, OpenAI::Models::Responses::ResponseOutputRefusal]] - ) - end - private def variants - end - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_output_refusal.rbi b/rbi/lib/openai/models/responses/response_output_refusal.rbi deleted file mode 100644 index f573eb83..00000000 --- a/rbi/lib/openai/models/responses/response_output_refusal.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseOutputRefusal < OpenAI::BaseModel - sig { returns(String) } - def refusal - end - - sig { params(_: String).returns(String) } - def refusal=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(refusal: String, type: Symbol).returns(T.attached_class) } - def self.new(refusal:, type: :refusal) - end - - sig { override.returns({refusal: String, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_output_text.rbi b/rbi/lib/openai/models/responses/response_output_text.rbi deleted file mode 100644 index 7f9e510e..00000000 --- a/rbi/lib/openai/models/responses/response_output_text.rbi +++ /dev/null @@ -1,244 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseOutputText < OpenAI::BaseModel - sig do - returns( - T::Array[ - T.any( - OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, - OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, - OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath - ) - ] - ) - end - def annotations - end - - sig do - params( - _: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, - OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, - OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath - ) - ] - ) - .returns( - T::Array[ - T.any( - OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, - OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, - OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath - ) - ] - ) - end - def annotations=(_) - end - - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - annotations: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, - OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, - OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath - ) - ], - text: String, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(annotations:, text:, type: :output_text) - end - - sig do - override - .returns( - { - annotations: T::Array[ - T.any( - OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, - OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, - OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath - ) - ], - text: String, - type: Symbol - } - ) - end - def to_hash - end - - class Annotation < OpenAI::Union - abstract! - - class FileCitation < OpenAI::BaseModel - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(file_id:, index:, type: :file_citation) - end - - sig { override.returns({file_id: String, index: Integer, type: Symbol}) } - def to_hash - end - end - - class URLCitation < OpenAI::BaseModel - sig { returns(Integer) } - def end_index - end - - sig { params(_: Integer).returns(Integer) } - def end_index=(_) - end - - sig { returns(Integer) } - def start_index - end - - sig { params(_: Integer).returns(Integer) } - def start_index=(_) - end - - sig { returns(String) } - def title - end - - sig { params(_: String).returns(String) } - def title=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(String) } - def url - end - - sig { params(_: String).returns(String) } - def url=(_) - end - - sig do - params(end_index: Integer, start_index: Integer, title: String, url: String, type: Symbol) - .returns(T.attached_class) - end - def self.new(end_index:, start_index:, title:, url:, type: :url_citation) - end - - sig do - override.returns( - { - end_index: Integer, - start_index: Integer, - title: String, - type: Symbol, - url: String - } - ) - end - def to_hash - end - end - - class FilePath < OpenAI::BaseModel - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(file_id:, index:, type: :file_path) - end - - sig { override.returns({file_id: String, index: Integer, type: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation], [Symbol, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation], [Symbol, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath]] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_reasoning_item.rbi b/rbi/lib/openai/models/responses/response_reasoning_item.rbi deleted file mode 100644 index 4733fee4..00000000 --- a/rbi/lib/openai/models/responses/response_reasoning_item.rbi +++ /dev/null @@ -1,110 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseReasoningItem < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Array[OpenAI::Models::Responses::ResponseReasoningItem::Summary]) } - def summary - end - - sig do - params(_: T::Array[OpenAI::Models::Responses::ResponseReasoningItem::Summary]) - .returns(T::Array[OpenAI::Models::Responses::ResponseReasoningItem::Summary]) - end - def summary=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(Symbol)) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig do - params( - id: String, - summary: T::Array[OpenAI::Models::Responses::ResponseReasoningItem::Summary], - status: Symbol, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, summary:, status: nil, type: :reasoning) - end - - sig do - override - .returns( - { - id: String, - summary: T::Array[OpenAI::Models::Responses::ResponseReasoningItem::Summary], - type: Symbol, - status: Symbol - } - ) - end - def to_hash - end - - class Summary < OpenAI::BaseModel - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(text: String, type: Symbol).returns(T.attached_class) } - def self.new(text:, type: :summary_text) - end - - sig { override.returns({text: String, type: Symbol}) } - def to_hash - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi b/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi deleted file mode 100644 index 7517aba1..00000000 --- a/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi +++ /dev/null @@ -1,69 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseRefusalDeltaEvent < OpenAI::BaseModel - sig { returns(Integer) } - def content_index - end - - sig { params(_: Integer).returns(Integer) } - def content_index=(_) - end - - sig { returns(String) } - def delta - end - - sig { params(_: String).returns(String) } - def delta=(_) - end - - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(content_index: Integer, delta: String, item_id: String, output_index: Integer, type: Symbol) - .returns(T.attached_class) - end - def self.new(content_index:, delta:, item_id:, output_index:, type: :"response.refusal.delta") - end - - sig do - override - .returns({ - content_index: Integer, - delta: String, - item_id: String, - output_index: Integer, - type: Symbol - }) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_refusal_done_event.rbi b/rbi/lib/openai/models/responses/response_refusal_done_event.rbi deleted file mode 100644 index 560a27ef..00000000 --- a/rbi/lib/openai/models/responses/response_refusal_done_event.rbi +++ /dev/null @@ -1,75 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseRefusalDoneEvent < OpenAI::BaseModel - sig { returns(Integer) } - def content_index - end - - sig { params(_: Integer).returns(Integer) } - def content_index=(_) - end - - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(String) } - def refusal - end - - sig { params(_: String).returns(String) } - def refusal=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - content_index: Integer, - item_id: String, - output_index: Integer, - refusal: String, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new(content_index:, item_id:, output_index:, refusal:, type: :"response.refusal.done") - end - - sig do - override - .returns({ - content_index: Integer, - item_id: String, - output_index: Integer, - refusal: String, - type: Symbol - }) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_retrieve_params.rbi b/rbi/lib/openai/models/responses/response_retrieve_params.rbi deleted file mode 100644 index 229f8fbb..00000000 --- a/rbi/lib/openai/models/responses/response_retrieve_params.rbi +++ /dev/null @@ -1,34 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(T::Array[Symbol])) } - def include - end - - sig { params(_: T::Array[Symbol]).returns(T::Array[Symbol]) } - def include=(_) - end - - sig do - params( - include: T::Array[Symbol], - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(include: nil, request_options: {}) - end - - sig { override.returns({include: T::Array[Symbol], request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_status.rbi b/rbi/lib/openai/models/responses/response_status.rbi deleted file mode 100644 index 11ae0a2a..00000000 --- a/rbi/lib/openai/models/responses/response_status.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseStatus < OpenAI::Enum - abstract! - - COMPLETED = :completed - FAILED = :failed - IN_PROGRESS = :in_progress - INCOMPLETE = :incomplete - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_stream_event.rbi b/rbi/lib/openai/models/responses/response_stream_event.rbi deleted file mode 100644 index a0f3b82c..00000000 --- a/rbi/lib/openai/models/responses/response_stream_event.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseStreamEvent < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseAudioDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseAudioDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent], [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent], [Symbol, OpenAI::Models::Responses::ResponseCompletedEvent], [Symbol, OpenAI::Models::Responses::ResponseContentPartAddedEvent], [Symbol, OpenAI::Models::Responses::ResponseContentPartDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseCreatedEvent], [Symbol, OpenAI::Models::Responses::ResponseErrorEvent], [Symbol, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent], [Symbol, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent], [Symbol, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent], [Symbol, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseInProgressEvent], [Symbol, OpenAI::Models::Responses::ResponseFailedEvent], [Symbol, OpenAI::Models::Responses::ResponseIncompleteEvent], [Symbol, OpenAI::Models::Responses::ResponseOutputItemAddedEvent], [Symbol, OpenAI::Models::Responses::ResponseOutputItemDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseRefusalDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseRefusalDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseTextDeltaEvent], [Symbol, OpenAI::Models::Responses::ResponseTextDoneEvent], [Symbol, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent], [Symbol, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent], [Symbol, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi b/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi deleted file mode 100644 index 2d60b64e..00000000 --- a/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi +++ /dev/null @@ -1,271 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseTextAnnotationDeltaEvent < OpenAI::BaseModel - sig do - returns( - T.any( - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath - ) - ) - end - def annotation - end - - sig do - params( - _: T.any( - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath - ) - ) - .returns( - T.any( - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath - ) - ) - end - def annotation=(_) - end - - sig { returns(Integer) } - def annotation_index - end - - sig { params(_: Integer).returns(Integer) } - def annotation_index=(_) - end - - sig { returns(Integer) } - def content_index - end - - sig { params(_: Integer).returns(Integer) } - def content_index=(_) - end - - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params( - annotation: T.any( - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath - ), - annotation_index: Integer, - content_index: Integer, - item_id: String, - output_index: Integer, - type: Symbol - ) - .returns(T.attached_class) - end - def self.new( - annotation:, - annotation_index:, - content_index:, - item_id:, - output_index:, - type: :"response.output_text.annotation.added" - ) - end - - sig do - override - .returns( - { - annotation: T.any( - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath - ), - annotation_index: Integer, - content_index: Integer, - item_id: String, - output_index: Integer, - type: Symbol - } - ) - end - def to_hash - end - - class Annotation < OpenAI::Union - abstract! - - class FileCitation < OpenAI::BaseModel - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(file_id:, index:, type: :file_citation) - end - - sig { override.returns({file_id: String, index: Integer, type: Symbol}) } - def to_hash - end - end - - class URLCitation < OpenAI::BaseModel - sig { returns(Integer) } - def end_index - end - - sig { params(_: Integer).returns(Integer) } - def end_index=(_) - end - - sig { returns(Integer) } - def start_index - end - - sig { params(_: Integer).returns(Integer) } - def start_index=(_) - end - - sig { returns(String) } - def title - end - - sig { params(_: String).returns(String) } - def title=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(String) } - def url - end - - sig { params(_: String).returns(String) } - def url=(_) - end - - sig do - params(end_index: Integer, start_index: Integer, title: String, url: String, type: Symbol) - .returns(T.attached_class) - end - def self.new(end_index:, start_index:, title:, url:, type: :url_citation) - end - - sig do - override.returns( - { - end_index: Integer, - start_index: Integer, - title: String, - type: Symbol, - url: String - } - ) - end - def to_hash - end - end - - class FilePath < OpenAI::BaseModel - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(Integer) } - def index - end - - sig { params(_: Integer).returns(Integer) } - def index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(file_id:, index:, type: :file_path) - end - - sig { override.returns({file_id: String, index: Integer, type: Symbol}) } - def to_hash - end - end - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation], [Symbol, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation], [Symbol, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath]] - ) - end - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_text_config.rbi b/rbi/lib/openai/models/responses/response_text_config.rbi deleted file mode 100644 index a04a35f9..00000000 --- a/rbi/lib/openai/models/responses/response_text_config.rbi +++ /dev/null @@ -1,70 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseTextConfig < OpenAI::BaseModel - sig do - returns( - T.nilable( - T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, - OpenAI::Models::ResponseFormatJSONObject - ) - ) - ) - end - def format_ - end - - sig do - params( - _: T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, - OpenAI::Models::ResponseFormatJSONObject - ) - ) - .returns( - T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, - OpenAI::Models::ResponseFormatJSONObject - ) - ) - end - def format_=(_) - end - - sig do - params( - format_: T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, - OpenAI::Models::ResponseFormatJSONObject - ) - ) - .returns(T.attached_class) - end - def self.new(format_: nil) - end - - sig do - override - .returns( - { - format_: T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, - OpenAI::Models::ResponseFormatJSONObject - ) - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_text_delta_event.rbi b/rbi/lib/openai/models/responses/response_text_delta_event.rbi deleted file mode 100644 index acc2fa04..00000000 --- a/rbi/lib/openai/models/responses/response_text_delta_event.rbi +++ /dev/null @@ -1,69 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseTextDeltaEvent < OpenAI::BaseModel - sig { returns(Integer) } - def content_index - end - - sig { params(_: Integer).returns(Integer) } - def content_index=(_) - end - - sig { returns(String) } - def delta - end - - sig { params(_: String).returns(String) } - def delta=(_) - end - - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(content_index: Integer, delta: String, item_id: String, output_index: Integer, type: Symbol) - .returns(T.attached_class) - end - def self.new(content_index:, delta:, item_id:, output_index:, type: :"response.output_text.delta") - end - - sig do - override - .returns({ - content_index: Integer, - delta: String, - item_id: String, - output_index: Integer, - type: Symbol - }) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_text_done_event.rbi b/rbi/lib/openai/models/responses/response_text_done_event.rbi deleted file mode 100644 index 8b2ece95..00000000 --- a/rbi/lib/openai/models/responses/response_text_done_event.rbi +++ /dev/null @@ -1,69 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseTextDoneEvent < OpenAI::BaseModel - sig { returns(Integer) } - def content_index - end - - sig { params(_: Integer).returns(Integer) } - def content_index=(_) - end - - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig do - params(content_index: Integer, item_id: String, output_index: Integer, text: String, type: Symbol) - .returns(T.attached_class) - end - def self.new(content_index:, item_id:, output_index:, text:, type: :"response.output_text.done") - end - - sig do - override - .returns({ - content_index: Integer, - item_id: String, - output_index: Integer, - text: String, - type: Symbol - }) - end - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_usage.rbi b/rbi/lib/openai/models/responses/response_usage.rbi deleted file mode 100644 index 87f7c238..00000000 --- a/rbi/lib/openai/models/responses/response_usage.rbi +++ /dev/null @@ -1,88 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseUsage < OpenAI::BaseModel - sig { returns(Integer) } - def input_tokens - end - - sig { params(_: Integer).returns(Integer) } - def input_tokens=(_) - end - - sig { returns(Integer) } - def output_tokens - end - - sig { params(_: Integer).returns(Integer) } - def output_tokens=(_) - end - - sig { returns(OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails) } - def output_tokens_details - end - - sig do - params(_: OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails) - .returns(OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails) - end - def output_tokens_details=(_) - end - - sig { returns(Integer) } - def total_tokens - end - - sig { params(_: Integer).returns(Integer) } - def total_tokens=(_) - end - - sig do - params( - input_tokens: Integer, - output_tokens: Integer, - output_tokens_details: OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails, - total_tokens: Integer - ) - .returns(T.attached_class) - end - def self.new(input_tokens:, output_tokens:, output_tokens_details:, total_tokens:) - end - - sig do - override - .returns( - { - input_tokens: Integer, - output_tokens: Integer, - output_tokens_details: OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails, - total_tokens: Integer - } - ) - end - def to_hash - end - - class OutputTokensDetails < OpenAI::BaseModel - sig { returns(Integer) } - def reasoning_tokens - end - - sig { params(_: Integer).returns(Integer) } - def reasoning_tokens=(_) - end - - sig { params(reasoning_tokens: Integer).returns(T.attached_class) } - def self.new(reasoning_tokens:) - end - - sig { override.returns({reasoning_tokens: Integer}) } - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi b/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi deleted file mode 100644 index 16bde4c4..00000000 --- a/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseWebSearchCallCompletedEvent < OpenAI::BaseModel - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(item_id:, output_index:, type: :"response.web_search_call.completed") - end - - sig { override.returns({item_id: String, output_index: Integer, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi deleted file mode 100644 index 654b7293..00000000 --- a/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseWebSearchCallInProgressEvent < OpenAI::BaseModel - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(item_id:, output_index:, type: :"response.web_search_call.in_progress") - end - - sig { override.returns({item_id: String, output_index: Integer, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi b/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi deleted file mode 100644 index c0de9efa..00000000 --- a/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi +++ /dev/null @@ -1,41 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseWebSearchCallSearchingEvent < OpenAI::BaseModel - sig { returns(String) } - def item_id - end - - sig { params(_: String).returns(String) } - def item_id=(_) - end - - sig { returns(Integer) } - def output_index - end - - sig { params(_: Integer).returns(Integer) } - def output_index=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } - def self.new(item_id:, output_index:, type: :"response.web_search_call.searching") - end - - sig { override.returns({item_id: String, output_index: Integer, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/tool.rbi b/rbi/lib/openai/models/responses/tool.rbi deleted file mode 100644 index 00e4ecf8..00000000 --- a/rbi/lib/openai/models/responses/tool.rbi +++ /dev/null @@ -1,22 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class Tool < OpenAI::Union - abstract! - - class << self - sig do - override - .returns( - [[Symbol, OpenAI::Models::Responses::FileSearchTool], [Symbol, OpenAI::Models::Responses::FunctionTool], [Symbol, OpenAI::Models::Responses::ComputerTool], [NilClass, OpenAI::Models::Responses::WebSearchTool]] - ) - end - private def variants - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/tool_choice_function.rbi b/rbi/lib/openai/models/responses/tool_choice_function.rbi deleted file mode 100644 index c11c91a4..00000000 --- a/rbi/lib/openai/models/responses/tool_choice_function.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ToolChoiceFunction < OpenAI::BaseModel - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(name: String, type: Symbol).returns(T.attached_class) } - def self.new(name:, type: :function) - end - - sig { override.returns({name: String, type: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/tool_choice_options.rbi b/rbi/lib/openai/models/responses/tool_choice_options.rbi deleted file mode 100644 index 7e1f9984..00000000 --- a/rbi/lib/openai/models/responses/tool_choice_options.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ToolChoiceOptions < OpenAI::Enum - abstract! - - NONE = :none - AUTO = :auto - REQUIRED = :required - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/tool_choice_types.rbi b/rbi/lib/openai/models/responses/tool_choice_types.rbi deleted file mode 100644 index 8176455f..00000000 --- a/rbi/lib/openai/models/responses/tool_choice_types.rbi +++ /dev/null @@ -1,40 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ToolChoiceTypes < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(type: Symbol).returns(T.attached_class) } - def self.new(type:) - end - - sig { override.returns({type: Symbol}) } - def to_hash - end - - class Type < OpenAI::Enum - abstract! - - FILE_SEARCH = :file_search - WEB_SEARCH_PREVIEW = :web_search_preview - COMPUTER_USE_PREVIEW = :computer_use_preview - WEB_SEARCH_PREVIEW_2025_03_11 = :web_search_preview_2025_03_11 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/responses/web_search_tool.rbi b/rbi/lib/openai/models/responses/web_search_tool.rbi deleted file mode 100644 index be37fbf0..00000000 --- a/rbi/lib/openai/models/responses/web_search_tool.rbi +++ /dev/null @@ -1,142 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class WebSearchTool < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(Symbol)) } - def search_context_size - end - - sig { params(_: Symbol).returns(Symbol) } - def search_context_size=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Responses::WebSearchTool::UserLocation)) } - def user_location - end - - sig do - params(_: T.nilable(OpenAI::Models::Responses::WebSearchTool::UserLocation)) - .returns(T.nilable(OpenAI::Models::Responses::WebSearchTool::UserLocation)) - end - def user_location=(_) - end - - sig do - params( - type: Symbol, - search_context_size: Symbol, - user_location: T.nilable(OpenAI::Models::Responses::WebSearchTool::UserLocation) - ) - .returns(T.attached_class) - end - def self.new(type:, search_context_size: nil, user_location: nil) - end - - sig do - override - .returns( - { - type: Symbol, - search_context_size: Symbol, - user_location: T.nilable(OpenAI::Models::Responses::WebSearchTool::UserLocation) - } - ) - end - def to_hash - end - - class Type < OpenAI::Enum - abstract! - - WEB_SEARCH_PREVIEW = :web_search_preview - WEB_SEARCH_PREVIEW_2025_03_11 = :web_search_preview_2025_03_11 - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class SearchContextSize < OpenAI::Enum - abstract! - - LOW = :low - MEDIUM = :medium - HIGH = :high - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class UserLocation < OpenAI::BaseModel - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { returns(T.nilable(String)) } - def city - end - - sig { params(_: String).returns(String) } - def city=(_) - end - - sig { returns(T.nilable(String)) } - def country - end - - sig { params(_: String).returns(String) } - def country=(_) - end - - sig { returns(T.nilable(String)) } - def region - end - - sig { params(_: String).returns(String) } - def region=(_) - end - - sig { returns(T.nilable(String)) } - def timezone - end - - sig { params(_: String).returns(String) } - def timezone=(_) - end - - sig do - params(city: String, country: String, region: String, timezone: String, type: Symbol) - .returns(T.attached_class) - end - def self.new(city: nil, country: nil, region: nil, timezone: nil, type: :approximate) - end - - sig do - override.returns({type: Symbol, city: String, country: String, region: String, timezone: String}) - end - def to_hash - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/static_file_chunking_strategy.rbi b/rbi/lib/openai/models/static_file_chunking_strategy.rbi deleted file mode 100644 index b076e093..00000000 --- a/rbi/lib/openai/models/static_file_chunking_strategy.rbi +++ /dev/null @@ -1,31 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class StaticFileChunkingStrategy < OpenAI::BaseModel - sig { returns(Integer) } - def chunk_overlap_tokens - end - - sig { params(_: Integer).returns(Integer) } - def chunk_overlap_tokens=(_) - end - - sig { returns(Integer) } - def max_chunk_size_tokens - end - - sig { params(_: Integer).returns(Integer) } - def max_chunk_size_tokens=(_) - end - - sig { params(chunk_overlap_tokens: Integer, max_chunk_size_tokens: Integer).returns(T.attached_class) } - def self.new(chunk_overlap_tokens:, max_chunk_size_tokens:) - end - - sig { override.returns({chunk_overlap_tokens: Integer, max_chunk_size_tokens: Integer}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi b/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi deleted file mode 100644 index 94e5e78c..00000000 --- a/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi +++ /dev/null @@ -1,31 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class StaticFileChunkingStrategyObject < OpenAI::BaseModel - sig { returns(OpenAI::Models::StaticFileChunkingStrategy) } - def static - end - - sig { params(_: OpenAI::Models::StaticFileChunkingStrategy).returns(OpenAI::Models::StaticFileChunkingStrategy) } - def static=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(static: OpenAI::Models::StaticFileChunkingStrategy, type: Symbol).returns(T.attached_class) } - def self.new(static:, type: :static) - end - - sig { override.returns({static: OpenAI::Models::StaticFileChunkingStrategy, type: Symbol}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi b/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi deleted file mode 100644 index 3eed65cb..00000000 --- a/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi +++ /dev/null @@ -1,31 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class StaticFileChunkingStrategyObjectParam < OpenAI::BaseModel - sig { returns(OpenAI::Models::StaticFileChunkingStrategy) } - def static - end - - sig { params(_: OpenAI::Models::StaticFileChunkingStrategy).returns(OpenAI::Models::StaticFileChunkingStrategy) } - def static=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(static: OpenAI::Models::StaticFileChunkingStrategy, type: Symbol).returns(T.attached_class) } - def self.new(static:, type: :static) - end - - sig { override.returns({static: OpenAI::Models::StaticFileChunkingStrategy, type: Symbol}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/upload.rbi b/rbi/lib/openai/models/upload.rbi deleted file mode 100644 index 422b80e9..00000000 --- a/rbi/lib/openai/models/upload.rbi +++ /dev/null @@ -1,130 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class Upload < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def bytes - end - - sig { params(_: Integer).returns(Integer) } - def bytes=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(Integer) } - def expires_at - end - - sig { params(_: Integer).returns(Integer) } - def expires_at=(_) - end - - sig { returns(String) } - def filename - end - - sig { params(_: String).returns(String) } - def filename=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(String) } - def purpose - end - - sig { params(_: String).returns(String) } - def purpose=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(T.nilable(OpenAI::Models::FileObject)) } - def file - end - - sig { params(_: T.nilable(OpenAI::Models::FileObject)).returns(T.nilable(OpenAI::Models::FileObject)) } - def file=(_) - end - - sig do - params( - id: String, - bytes: Integer, - created_at: Integer, - expires_at: Integer, - filename: String, - purpose: String, - status: Symbol, - file: T.nilable(OpenAI::Models::FileObject), - object: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, bytes:, created_at:, expires_at:, filename:, purpose:, status:, file: nil, object: :upload) - end - - sig do - override - .returns( - { - id: String, - bytes: Integer, - created_at: Integer, - expires_at: Integer, - filename: String, - object: Symbol, - purpose: String, - status: Symbol, - file: T.nilable(OpenAI::Models::FileObject) - } - ) - end - def to_hash - end - - class Status < OpenAI::Enum - abstract! - - PENDING = :pending - COMPLETED = :completed - CANCELLED = :cancelled - EXPIRED = :expired - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/upload_cancel_params.rbi b/rbi/lib/openai/models/upload_cancel_params.rbi deleted file mode 100644 index 6db61397..00000000 --- a/rbi/lib/openai/models/upload_cancel_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class UploadCancelParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/upload_complete_params.rbi b/rbi/lib/openai/models/upload_complete_params.rbi deleted file mode 100644 index 840707e6..00000000 --- a/rbi/lib/openai/models/upload_complete_params.rbi +++ /dev/null @@ -1,43 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class UploadCompleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T::Array[String]) } - def part_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def part_ids=(_) - end - - sig { returns(T.nilable(String)) } - def md5 - end - - sig { params(_: String).returns(String) } - def md5=(_) - end - - sig do - params( - part_ids: T::Array[String], - md5: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(part_ids:, md5: nil, request_options: {}) - end - - sig do - override.returns({part_ids: T::Array[String], md5: String, request_options: OpenAI::RequestOptions}) - end - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/upload_create_params.rbi b/rbi/lib/openai/models/upload_create_params.rbi deleted file mode 100644 index 6d144a54..00000000 --- a/rbi/lib/openai/models/upload_create_params.rbi +++ /dev/null @@ -1,70 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class UploadCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(Integer) } - def bytes - end - - sig { params(_: Integer).returns(Integer) } - def bytes=(_) - end - - sig { returns(String) } - def filename - end - - sig { params(_: String).returns(String) } - def filename=(_) - end - - sig { returns(String) } - def mime_type - end - - sig { params(_: String).returns(String) } - def mime_type=(_) - end - - sig { returns(Symbol) } - def purpose - end - - sig { params(_: Symbol).returns(Symbol) } - def purpose=(_) - end - - sig do - params( - bytes: Integer, - filename: String, - mime_type: String, - purpose: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(bytes:, filename:, mime_type:, purpose:, request_options: {}) - end - - sig do - override - .returns( - { - bytes: Integer, - filename: String, - mime_type: String, - purpose: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/uploads/part_create_params.rbi b/rbi/lib/openai/models/uploads/part_create_params.rbi deleted file mode 100644 index 4e2fe8c7..00000000 --- a/rbi/lib/openai/models/uploads/part_create_params.rbi +++ /dev/null @@ -1,34 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Uploads - class PartCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(IO, StringIO)) } - def data - end - - sig { params(_: T.any(IO, StringIO)).returns(T.any(IO, StringIO)) } - def data=(_) - end - - sig do - params( - data: T.any(IO, StringIO), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(data:, request_options: {}) - end - - sig { override.returns({data: T.any(IO, StringIO), request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/uploads/upload_part.rbi b/rbi/lib/openai/models/uploads/upload_part.rbi deleted file mode 100644 index 6801e859..00000000 --- a/rbi/lib/openai/models/uploads/upload_part.rbi +++ /dev/null @@ -1,53 +0,0 @@ -# typed: strong - -module OpenAI - module Models - UploadPart = T.type_alias { Uploads::UploadPart } - - module Uploads - class UploadPart < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(String) } - def upload_id - end - - sig { params(_: String).returns(String) } - def upload_id=(_) - end - - sig do - params(id: String, created_at: Integer, upload_id: String, object: Symbol).returns(T.attached_class) - end - def self.new(id:, created_at:, upload_id:, object: :"upload.part") - end - - sig { override.returns({id: String, created_at: Integer, object: Symbol, upload_id: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_store.rbi b/rbi/lib/openai/models/vector_store.rbi deleted file mode 100644 index e60530c4..00000000 --- a/rbi/lib/openai/models/vector_store.rbi +++ /dev/null @@ -1,255 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class VectorStore < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(OpenAI::Models::VectorStore::FileCounts) } - def file_counts - end - - sig { params(_: OpenAI::Models::VectorStore::FileCounts).returns(OpenAI::Models::VectorStore::FileCounts) } - def file_counts=(_) - end - - sig { returns(T.nilable(Integer)) } - def last_active_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def last_active_at=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(String) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(Integer) } - def usage_bytes - end - - sig { params(_: Integer).returns(Integer) } - def usage_bytes=(_) - end - - sig { returns(T.nilable(OpenAI::Models::VectorStore::ExpiresAfter)) } - def expires_after - end - - sig { params(_: OpenAI::Models::VectorStore::ExpiresAfter).returns(OpenAI::Models::VectorStore::ExpiresAfter) } - def expires_after=(_) - end - - sig { returns(T.nilable(Integer)) } - def expires_at - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def expires_at=(_) - end - - sig do - params( - id: String, - created_at: Integer, - file_counts: OpenAI::Models::VectorStore::FileCounts, - last_active_at: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - name: String, - status: Symbol, - usage_bytes: Integer, - expires_after: OpenAI::Models::VectorStore::ExpiresAfter, - expires_at: T.nilable(Integer), - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - created_at:, - file_counts:, - last_active_at:, - metadata:, - name:, - status:, - usage_bytes:, - expires_after: nil, - expires_at: nil, - object: :vector_store - ) - end - - sig do - override - .returns( - { - id: String, - created_at: Integer, - file_counts: OpenAI::Models::VectorStore::FileCounts, - last_active_at: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - name: String, - object: Symbol, - status: Symbol, - usage_bytes: Integer, - expires_after: OpenAI::Models::VectorStore::ExpiresAfter, - expires_at: T.nilable(Integer) - } - ) - end - def to_hash - end - - class FileCounts < OpenAI::BaseModel - sig { returns(Integer) } - def cancelled - end - - sig { params(_: Integer).returns(Integer) } - def cancelled=(_) - end - - sig { returns(Integer) } - def completed - end - - sig { params(_: Integer).returns(Integer) } - def completed=(_) - end - - sig { returns(Integer) } - def failed - end - - sig { params(_: Integer).returns(Integer) } - def failed=(_) - end - - sig { returns(Integer) } - def in_progress - end - - sig { params(_: Integer).returns(Integer) } - def in_progress=(_) - end - - sig { returns(Integer) } - def total - end - - sig { params(_: Integer).returns(Integer) } - def total=(_) - end - - sig do - params( - cancelled: Integer, - completed: Integer, - failed: Integer, - in_progress: Integer, - total: Integer - ) - .returns(T.attached_class) - end - def self.new(cancelled:, completed:, failed:, in_progress:, total:) - end - - sig do - override - .returns({ - cancelled: Integer, - completed: Integer, - failed: Integer, - in_progress: Integer, - total: Integer - }) - end - def to_hash - end - end - - class Status < OpenAI::Enum - abstract! - - EXPIRED = :expired - IN_PROGRESS = :in_progress - COMPLETED = :completed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class ExpiresAfter < OpenAI::BaseModel - sig { returns(Symbol) } - def anchor - end - - sig { params(_: Symbol).returns(Symbol) } - def anchor=(_) - end - - sig { returns(Integer) } - def days - end - - sig { params(_: Integer).returns(Integer) } - def days=(_) - end - - sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } - def self.new(days:, anchor: :last_active_at) - end - - sig { override.returns({anchor: Symbol, days: Integer}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_store_create_params.rbi b/rbi/lib/openai/models/vector_store_create_params.rbi deleted file mode 100644 index 4444751d..00000000 --- a/rbi/lib/openai/models/vector_store_create_params.rbi +++ /dev/null @@ -1,137 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class VectorStoreCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - returns( - T.nilable( - T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ) - ) - ) - end - def chunking_strategy - end - - sig do - params( - _: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ) - ) - .returns( - T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ) - ) - end - def chunking_strategy=(_) - end - - sig { returns(T.nilable(OpenAI::Models::VectorStoreCreateParams::ExpiresAfter)) } - def expires_after - end - - sig do - params(_: OpenAI::Models::VectorStoreCreateParams::ExpiresAfter) - .returns(OpenAI::Models::VectorStoreCreateParams::ExpiresAfter) - end - def expires_after=(_) - end - - sig { returns(T.nilable(T::Array[String])) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: String).returns(String) } - def name=(_) - end - - sig do - params( - chunking_strategy: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ), - expires_after: OpenAI::Models::VectorStoreCreateParams::ExpiresAfter, - file_ids: T::Array[String], - metadata: T.nilable(OpenAI::Models::Metadata), - name: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(chunking_strategy: nil, expires_after: nil, file_ids: nil, metadata: nil, name: nil, request_options: {}) - end - - sig do - override - .returns( - { - chunking_strategy: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ), - expires_after: OpenAI::Models::VectorStoreCreateParams::ExpiresAfter, - file_ids: T::Array[String], - metadata: T.nilable(OpenAI::Models::Metadata), - name: String, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class ExpiresAfter < OpenAI::BaseModel - sig { returns(Symbol) } - def anchor - end - - sig { params(_: Symbol).returns(Symbol) } - def anchor=(_) - end - - sig { returns(Integer) } - def days - end - - sig { params(_: Integer).returns(Integer) } - def days=(_) - end - - sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } - def self.new(days:, anchor: :last_active_at) - end - - sig { override.returns({anchor: Symbol, days: Integer}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_store_delete_params.rbi b/rbi/lib/openai/models/vector_store_delete_params.rbi deleted file mode 100644 index 3fa908fe..00000000 --- a/rbi/lib/openai/models/vector_store_delete_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class VectorStoreDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/vector_store_deleted.rbi b/rbi/lib/openai/models/vector_store_deleted.rbi deleted file mode 100644 index ede60489..00000000 --- a/rbi/lib/openai/models/vector_store_deleted.rbi +++ /dev/null @@ -1,39 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class VectorStoreDeleted < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Boolean) } - def deleted - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def deleted=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { params(id: String, deleted: T::Boolean, object: Symbol).returns(T.attached_class) } - def self.new(id:, deleted:, object: :"vector_store.deleted") - end - - sig { override.returns({id: String, deleted: T::Boolean, object: Symbol}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/vector_store_list_params.rbi b/rbi/lib/openai/models/vector_store_list_params.rbi deleted file mode 100644 index a0c37415..00000000 --- a/rbi/lib/openai/models/vector_store_list_params.rbi +++ /dev/null @@ -1,83 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class VectorStoreListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(String)) } - def before - end - - sig { params(_: String).returns(String) } - def before=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig do - params( - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, before: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - override - .returns( - { - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_store_retrieve_params.rbi b/rbi/lib/openai/models/vector_store_retrieve_params.rbi deleted file mode 100644 index 88a00415..00000000 --- a/rbi/lib/openai/models/vector_store_retrieve_params.rbi +++ /dev/null @@ -1,21 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class VectorStoreRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig do - params(request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.attached_class) - end - def self.new(request_options: {}) - end - - sig { override.returns({request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end -end diff --git a/rbi/lib/openai/models/vector_store_search_params.rbi b/rbi/lib/openai/models/vector_store_search_params.rbi deleted file mode 100644 index 52ea39e4..00000000 --- a/rbi/lib/openai/models/vector_store_search_params.rbi +++ /dev/null @@ -1,150 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class VectorStoreSearchParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.any(String, T::Array[String])) } - def query - end - - sig { params(_: T.any(String, T::Array[String])).returns(T.any(String, T::Array[String])) } - def query=(_) - end - - sig { returns(T.nilable(T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter))) } - def filters - end - - sig do - params(_: T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)) - .returns(T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)) - end - def filters=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_num_results - end - - sig { params(_: Integer).returns(Integer) } - def max_num_results=(_) - end - - sig { returns(T.nilable(OpenAI::Models::VectorStoreSearchParams::RankingOptions)) } - def ranking_options - end - - sig do - params(_: OpenAI::Models::VectorStoreSearchParams::RankingOptions) - .returns(OpenAI::Models::VectorStoreSearchParams::RankingOptions) - end - def ranking_options=(_) - end - - sig { returns(T.nilable(T::Boolean)) } - def rewrite_query - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def rewrite_query=(_) - end - - sig do - params( - query: T.any(String, T::Array[String]), - filters: T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter), - max_num_results: Integer, - ranking_options: OpenAI::Models::VectorStoreSearchParams::RankingOptions, - rewrite_query: T::Boolean, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(query:, filters: nil, max_num_results: nil, ranking_options: nil, rewrite_query: nil, request_options: {}) - end - - sig do - override - .returns( - { - query: T.any(String, T::Array[String]), - filters: T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter), - max_num_results: Integer, - ranking_options: OpenAI::Models::VectorStoreSearchParams::RankingOptions, - rewrite_query: T::Boolean, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Query < OpenAI::Union - abstract! - - StringArray = T.type_alias { T::Array[String] } - - class << self - sig { override.returns([[NilClass, String], [NilClass, T::Array[String]]]) } - private def variants - end - end - end - - class Filters < OpenAI::Union - abstract! - - class << self - sig do - override - .returns([[NilClass, OpenAI::Models::ComparisonFilter], [NilClass, OpenAI::Models::CompoundFilter]]) - end - private def variants - end - end - end - - class RankingOptions < OpenAI::BaseModel - sig { returns(T.nilable(Symbol)) } - def ranker - end - - sig { params(_: Symbol).returns(Symbol) } - def ranker=(_) - end - - sig { returns(T.nilable(Float)) } - def score_threshold - end - - sig { params(_: Float).returns(Float) } - def score_threshold=(_) - end - - sig { params(ranker: Symbol, score_threshold: Float).returns(T.attached_class) } - def self.new(ranker: nil, score_threshold: nil) - end - - sig { override.returns({ranker: Symbol, score_threshold: Float}) } - def to_hash - end - - class Ranker < OpenAI::Enum - abstract! - - AUTO = :auto - DEFAULT_2024_11_15 = :"default-2024-11-15" - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_store_search_response.rbi b/rbi/lib/openai/models/vector_store_search_response.rbi deleted file mode 100644 index d8ffa5fa..00000000 --- a/rbi/lib/openai/models/vector_store_search_response.rbi +++ /dev/null @@ -1,129 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class VectorStoreSearchResponse < OpenAI::BaseModel - sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } - def attributes - end - - sig do - params(_: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - .returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - end - def attributes=(_) - end - - sig { returns(T::Array[OpenAI::Models::VectorStoreSearchResponse::Content]) } - def content - end - - sig do - params(_: T::Array[OpenAI::Models::VectorStoreSearchResponse::Content]) - .returns(T::Array[OpenAI::Models::VectorStoreSearchResponse::Content]) - end - def content=(_) - end - - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(String) } - def filename - end - - sig { params(_: String).returns(String) } - def filename=(_) - end - - sig { returns(Float) } - def score - end - - sig { params(_: Float).returns(Float) } - def score=(_) - end - - sig do - params( - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - content: T::Array[OpenAI::Models::VectorStoreSearchResponse::Content], - file_id: String, - filename: String, - score: Float - ) - .returns(T.attached_class) - end - def self.new(attributes:, content:, file_id:, filename:, score:) - end - - sig do - override - .returns( - { - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - content: T::Array[OpenAI::Models::VectorStoreSearchResponse::Content], - file_id: String, - filename: String, - score: Float - } - ) - end - def to_hash - end - - class Attribute < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end - end - - class Content < OpenAI::BaseModel - sig { returns(String) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(Symbol) } - def type - end - - sig { params(_: Symbol).returns(Symbol) } - def type=(_) - end - - sig { params(text: String, type: Symbol).returns(T.attached_class) } - def self.new(text:, type:) - end - - sig { override.returns({text: String, type: Symbol}) } - def to_hash - end - - class Type < OpenAI::Enum - abstract! - - TEXT = :text - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_store_update_params.rbi b/rbi/lib/openai/models/vector_store_update_params.rbi deleted file mode 100644 index 609a1ea7..00000000 --- a/rbi/lib/openai/models/vector_store_update_params.rbi +++ /dev/null @@ -1,89 +0,0 @@ -# typed: strong - -module OpenAI - module Models - class VectorStoreUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter)) } - def expires_after - end - - sig do - params(_: T.nilable(OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter)) - .returns(T.nilable(OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter)) - end - def expires_after=(_) - end - - sig { returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata - end - - sig { params(_: T.nilable(OpenAI::Models::Metadata)).returns(T.nilable(OpenAI::Models::Metadata)) } - def metadata=(_) - end - - sig { returns(T.nilable(String)) } - def name - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def name=(_) - end - - sig do - params( - expires_after: T.nilable(OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter), - metadata: T.nilable(OpenAI::Models::Metadata), - name: T.nilable(String), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(expires_after: nil, metadata: nil, name: nil, request_options: {}) - end - - sig do - override - .returns( - { - expires_after: T.nilable(OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter), - metadata: T.nilable(OpenAI::Models::Metadata), - name: T.nilable(String), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class ExpiresAfter < OpenAI::BaseModel - sig { returns(Symbol) } - def anchor - end - - sig { params(_: Symbol).returns(Symbol) } - def anchor=(_) - end - - sig { returns(Integer) } - def days - end - - sig { params(_: Integer).returns(Integer) } - def days=(_) - end - - sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } - def self.new(days:, anchor: :last_active_at) - end - - sig { override.returns({anchor: Symbol, days: Integer}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_batch_cancel_params.rbi b/rbi/lib/openai/models/vector_stores/file_batch_cancel_params.rbi deleted file mode 100644 index cc6cad3f..00000000 --- a/rbi/lib/openai/models/vector_stores/file_batch_cancel_params.rbi +++ /dev/null @@ -1,34 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileBatchCancelParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def vector_store_id - end - - sig { params(_: String).returns(String) } - def vector_store_id=(_) - end - - sig do - params( - vector_store_id: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(vector_store_id:, request_options: {}) - end - - sig { override.returns({vector_store_id: String, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi b/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi deleted file mode 100644 index 50d4260c..00000000 --- a/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi +++ /dev/null @@ -1,103 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileBatchCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T::Array[String]) } - def file_ids - end - - sig { params(_: T::Array[String]).returns(T::Array[String]) } - def file_ids=(_) - end - - sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } - def attributes - end - - sig do - params(_: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - .returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - end - def attributes=(_) - end - - sig do - returns( - T.nilable( - T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ) - ) - ) - end - def chunking_strategy - end - - sig do - params( - _: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ) - ) - .returns( - T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ) - ) - end - def chunking_strategy=(_) - end - - sig do - params( - file_ids: T::Array[String], - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - chunking_strategy: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(file_ids:, attributes: nil, chunking_strategy: nil, request_options: {}) - end - - sig do - override - .returns( - { - file_ids: T::Array[String], - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - chunking_strategy: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Attribute < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi b/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi deleted file mode 100644 index 6847460c..00000000 --- a/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi +++ /dev/null @@ -1,120 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileBatchListFilesParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def vector_store_id - end - - sig { params(_: String).returns(String) } - def vector_store_id=(_) - end - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(String)) } - def before - end - - sig { params(_: String).returns(String) } - def before=(_) - end - - sig { returns(T.nilable(Symbol)) } - def filter - end - - sig { params(_: Symbol).returns(Symbol) } - def filter=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig do - params( - vector_store_id: String, - after: String, - before: String, - filter: Symbol, - limit: Integer, - order: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(vector_store_id:, after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - override - .returns( - { - vector_store_id: String, - after: String, - before: String, - filter: Symbol, - limit: Integer, - order: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Filter < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - FAILED = :failed - CANCELLED = :cancelled - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_batch_retrieve_params.rbi b/rbi/lib/openai/models/vector_stores/file_batch_retrieve_params.rbi deleted file mode 100644 index 20edec10..00000000 --- a/rbi/lib/openai/models/vector_stores/file_batch_retrieve_params.rbi +++ /dev/null @@ -1,34 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileBatchRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def vector_store_id - end - - sig { params(_: String).returns(String) } - def vector_store_id=(_) - end - - sig do - params( - vector_store_id: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(vector_store_id:, request_options: {}) - end - - sig { override.returns({vector_store_id: String, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_content_params.rbi b/rbi/lib/openai/models/vector_stores/file_content_params.rbi deleted file mode 100644 index e9c610a6..00000000 --- a/rbi/lib/openai/models/vector_stores/file_content_params.rbi +++ /dev/null @@ -1,34 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileContentParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def vector_store_id - end - - sig { params(_: String).returns(String) } - def vector_store_id=(_) - end - - sig do - params( - vector_store_id: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(vector_store_id:, request_options: {}) - end - - sig { override.returns({vector_store_id: String, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_content_response.rbi b/rbi/lib/openai/models/vector_stores/file_content_response.rbi deleted file mode 100644 index da110839..00000000 --- a/rbi/lib/openai/models/vector_stores/file_content_response.rbi +++ /dev/null @@ -1,33 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileContentResponse < OpenAI::BaseModel - sig { returns(T.nilable(String)) } - def text - end - - sig { params(_: String).returns(String) } - def text=(_) - end - - sig { returns(T.nilable(String)) } - def type - end - - sig { params(_: String).returns(String) } - def type=(_) - end - - sig { params(text: String, type: String).returns(T.attached_class) } - def self.new(text: nil, type: nil) - end - - sig { override.returns({text: String, type: String}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_create_params.rbi b/rbi/lib/openai/models/vector_stores/file_create_params.rbi deleted file mode 100644 index 9c1d277d..00000000 --- a/rbi/lib/openai/models/vector_stores/file_create_params.rbi +++ /dev/null @@ -1,103 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def file_id - end - - sig { params(_: String).returns(String) } - def file_id=(_) - end - - sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } - def attributes - end - - sig do - params(_: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - .returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - end - def attributes=(_) - end - - sig do - returns( - T.nilable( - T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ) - ) - ) - end - def chunking_strategy - end - - sig do - params( - _: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ) - ) - .returns( - T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ) - ) - end - def chunking_strategy=(_) - end - - sig do - params( - file_id: String, - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - chunking_strategy: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(file_id:, attributes: nil, chunking_strategy: nil, request_options: {}) - end - - sig do - override - .returns( - { - file_id: String, - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - chunking_strategy: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Attribute < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_delete_params.rbi b/rbi/lib/openai/models/vector_stores/file_delete_params.rbi deleted file mode 100644 index f7fe2e55..00000000 --- a/rbi/lib/openai/models/vector_stores/file_delete_params.rbi +++ /dev/null @@ -1,34 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def vector_store_id - end - - sig { params(_: String).returns(String) } - def vector_store_id=(_) - end - - sig do - params( - vector_store_id: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(vector_store_id:, request_options: {}) - end - - sig { override.returns({vector_store_id: String, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_list_params.rbi b/rbi/lib/openai/models/vector_stores/file_list_params.rbi deleted file mode 100644 index 42032f29..00000000 --- a/rbi/lib/openai/models/vector_stores/file_list_params.rbi +++ /dev/null @@ -1,110 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(T.nilable(String)) } - def after - end - - sig { params(_: String).returns(String) } - def after=(_) - end - - sig { returns(T.nilable(String)) } - def before - end - - sig { params(_: String).returns(String) } - def before=(_) - end - - sig { returns(T.nilable(Symbol)) } - def filter - end - - sig { params(_: Symbol).returns(Symbol) } - def filter=(_) - end - - sig { returns(T.nilable(Integer)) } - def limit - end - - sig { params(_: Integer).returns(Integer) } - def limit=(_) - end - - sig { returns(T.nilable(Symbol)) } - def order - end - - sig { params(_: Symbol).returns(Symbol) } - def order=(_) - end - - sig do - params( - after: String, - before: String, - filter: Symbol, - limit: Integer, - order: Symbol, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - override - .returns( - { - after: String, - before: String, - filter: Symbol, - limit: Integer, - order: Symbol, - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Filter < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - FAILED = :failed - CANCELLED = :cancelled - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Order < OpenAI::Enum - abstract! - - ASC = :asc - DESC = :desc - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_retrieve_params.rbi b/rbi/lib/openai/models/vector_stores/file_retrieve_params.rbi deleted file mode 100644 index 4f1f1768..00000000 --- a/rbi/lib/openai/models/vector_stores/file_retrieve_params.rbi +++ /dev/null @@ -1,34 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def vector_store_id - end - - sig { params(_: String).returns(String) } - def vector_store_id=(_) - end - - sig do - params( - vector_store_id: String, - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(vector_store_id:, request_options: {}) - end - - sig { override.returns({vector_store_id: String, request_options: OpenAI::RequestOptions}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/file_update_params.rbi b/rbi/lib/openai/models/vector_stores/file_update_params.rbi deleted file mode 100644 index 865c29c6..00000000 --- a/rbi/lib/openai/models/vector_stores/file_update_params.rbi +++ /dev/null @@ -1,65 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module VectorStores - class FileUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters - - sig { returns(String) } - def vector_store_id - end - - sig { params(_: String).returns(String) } - def vector_store_id=(_) - end - - sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } - def attributes - end - - sig do - params(_: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - .returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - end - def attributes=(_) - end - - sig do - params( - vector_store_id: String, - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - request_options: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]) - ) - .returns(T.attached_class) - end - def self.new(vector_store_id:, attributes:, request_options: {}) - end - - sig do - override - .returns( - { - vector_store_id: String, - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - request_options: OpenAI::RequestOptions - } - ) - end - def to_hash - end - - class Attribute < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/vector_store_file.rbi b/rbi/lib/openai/models/vector_stores/vector_store_file.rbi deleted file mode 100644 index 102bd1c5..00000000 --- a/rbi/lib/openai/models/vector_stores/vector_store_file.rbi +++ /dev/null @@ -1,213 +0,0 @@ -# typed: strong - -module OpenAI - module Models - VectorStoreFile = T.type_alias { VectorStores::VectorStoreFile } - - module VectorStores - class VectorStoreFile < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(T.nilable(OpenAI::Models::VectorStores::VectorStoreFile::LastError)) } - def last_error - end - - sig do - params(_: T.nilable(OpenAI::Models::VectorStores::VectorStoreFile::LastError)) - .returns(T.nilable(OpenAI::Models::VectorStores::VectorStoreFile::LastError)) - end - def last_error=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(Integer) } - def usage_bytes - end - - sig { params(_: Integer).returns(Integer) } - def usage_bytes=(_) - end - - sig { returns(String) } - def vector_store_id - end - - sig { params(_: String).returns(String) } - def vector_store_id=(_) - end - - sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } - def attributes - end - - sig do - params(_: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - .returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) - end - def attributes=(_) - end - - sig do - returns( - T.nilable( - T.any(OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject) - ) - ) - end - def chunking_strategy - end - - sig do - params( - _: T.any(OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject) - ) - .returns( - T.any(OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject) - ) - end - def chunking_strategy=(_) - end - - sig do - params( - id: String, - created_at: Integer, - last_error: T.nilable(OpenAI::Models::VectorStores::VectorStoreFile::LastError), - status: Symbol, - usage_bytes: Integer, - vector_store_id: String, - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - chunking_strategy: T.any(OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject), - object: Symbol - ) - .returns(T.attached_class) - end - def self.new( - id:, - created_at:, - last_error:, - status:, - usage_bytes:, - vector_store_id:, - attributes: nil, - chunking_strategy: nil, - object: :"vector_store.file" - ) - end - - sig do - override - .returns( - { - id: String, - created_at: Integer, - last_error: T.nilable(OpenAI::Models::VectorStores::VectorStoreFile::LastError), - object: Symbol, - status: Symbol, - usage_bytes: Integer, - vector_store_id: String, - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - chunking_strategy: T.any(OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject) - } - ) - end - def to_hash - end - - class LastError < OpenAI::BaseModel - sig { returns(Symbol) } - def code - end - - sig { params(_: Symbol).returns(Symbol) } - def code=(_) - end - - sig { returns(String) } - def message - end - - sig { params(_: String).returns(String) } - def message=(_) - end - - sig { params(code: Symbol, message: String).returns(T.attached_class) } - def self.new(code:, message:) - end - - sig { override.returns({code: Symbol, message: String}) } - def to_hash - end - - class Code < OpenAI::Enum - abstract! - - SERVER_ERROR = :server_error - UNSUPPORTED_FILE = :unsupported_file - INVALID_FILE = :invalid_file - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - CANCELLED = :cancelled - FAILED = :failed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - - class Attribute < OpenAI::Union - abstract! - - class << self - sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } - private def variants - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi b/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi deleted file mode 100644 index 1fa22e5f..00000000 --- a/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi +++ /dev/null @@ -1,175 +0,0 @@ -# typed: strong - -module OpenAI - module Models - VectorStoreFileBatch = T.type_alias { VectorStores::VectorStoreFileBatch } - - module VectorStores - class VectorStoreFileBatch < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(Integer) } - def created_at - end - - sig { params(_: Integer).returns(Integer) } - def created_at=(_) - end - - sig { returns(OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts) } - def file_counts - end - - sig do - params(_: OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts) - .returns(OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts) - end - def file_counts=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { returns(Symbol) } - def status - end - - sig { params(_: Symbol).returns(Symbol) } - def status=(_) - end - - sig { returns(String) } - def vector_store_id - end - - sig { params(_: String).returns(String) } - def vector_store_id=(_) - end - - sig do - params( - id: String, - created_at: Integer, - file_counts: OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts, - status: Symbol, - vector_store_id: String, - object: Symbol - ) - .returns(T.attached_class) - end - def self.new(id:, created_at:, file_counts:, status:, vector_store_id:, object: :"vector_store.files_batch") - end - - sig do - override - .returns( - { - id: String, - created_at: Integer, - file_counts: OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts, - object: Symbol, - status: Symbol, - vector_store_id: String - } - ) - end - def to_hash - end - - class FileCounts < OpenAI::BaseModel - sig { returns(Integer) } - def cancelled - end - - sig { params(_: Integer).returns(Integer) } - def cancelled=(_) - end - - sig { returns(Integer) } - def completed - end - - sig { params(_: Integer).returns(Integer) } - def completed=(_) - end - - sig { returns(Integer) } - def failed - end - - sig { params(_: Integer).returns(Integer) } - def failed=(_) - end - - sig { returns(Integer) } - def in_progress - end - - sig { params(_: Integer).returns(Integer) } - def in_progress=(_) - end - - sig { returns(Integer) } - def total - end - - sig { params(_: Integer).returns(Integer) } - def total=(_) - end - - sig do - params( - cancelled: Integer, - completed: Integer, - failed: Integer, - in_progress: Integer, - total: Integer - ) - .returns(T.attached_class) - end - def self.new(cancelled:, completed:, failed:, in_progress:, total:) - end - - sig do - override - .returns({ - cancelled: Integer, - completed: Integer, - failed: Integer, - in_progress: Integer, - total: Integer - }) - end - def to_hash - end - end - - class Status < OpenAI::Enum - abstract! - - IN_PROGRESS = :in_progress - COMPLETED = :completed - CANCELLED = :cancelled - FAILED = :failed - - class << self - sig { override.returns(T::Array[Symbol]) } - def values - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/models/vector_stores/vector_store_file_deleted.rbi b/rbi/lib/openai/models/vector_stores/vector_store_file_deleted.rbi deleted file mode 100644 index 932ec0f8..00000000 --- a/rbi/lib/openai/models/vector_stores/vector_store_file_deleted.rbi +++ /dev/null @@ -1,43 +0,0 @@ -# typed: strong - -module OpenAI - module Models - VectorStoreFileDeleted = T.type_alias { VectorStores::VectorStoreFileDeleted } - - module VectorStores - class VectorStoreFileDeleted < OpenAI::BaseModel - sig { returns(String) } - def id - end - - sig { params(_: String).returns(String) } - def id=(_) - end - - sig { returns(T::Boolean) } - def deleted - end - - sig { params(_: T::Boolean).returns(T::Boolean) } - def deleted=(_) - end - - sig { returns(Symbol) } - def object - end - - sig { params(_: Symbol).returns(Symbol) } - def object=(_) - end - - sig { params(id: String, deleted: T::Boolean, object: Symbol).returns(T.attached_class) } - def self.new(id:, deleted:, object: :"vector_store.file.deleted") - end - - sig { override.returns({id: String, deleted: T::Boolean, object: Symbol}) } - def to_hash - end - end - end - end -end diff --git a/rbi/lib/openai/page.rbi b/rbi/lib/openai/page.rbi deleted file mode 100644 index 876486a4..00000000 --- a/rbi/lib/openai/page.rbi +++ /dev/null @@ -1,37 +0,0 @@ -# typed: strong - -module OpenAI - class Page - include OpenAI::BasePage - - Elem = type_member - - sig { returns(T::Array[Elem]) } - def data - end - - sig { params(_: T::Array[Elem]).returns(T::Array[Elem]) } - def data=(_) - end - - sig { returns(String) } - def object - end - - sig { params(_: String).returns(String) } - def object=(_) - end - - sig do - params( - client: OpenAI::BaseClient, - req: OpenAI::BaseClient::RequestComponentsShape, - headers: T.any(T::Hash[String, String], Net::HTTPHeader), - page_data: T::Array[T.anything] - ) - .void - end - def initialize(client:, req:, headers:, page_data:) - end - end -end diff --git a/rbi/lib/openai/pooled_net_requester.rbi b/rbi/lib/openai/pooled_net_requester.rbi deleted file mode 100644 index e94f1912..00000000 --- a/rbi/lib/openai/pooled_net_requester.rbi +++ /dev/null @@ -1,38 +0,0 @@ -# typed: strong - -module OpenAI - class PooledNetRequester - RequestShape = T.type_alias do - {method: Symbol, url: URI::Generic, headers: T::Hash[String, String], body: T.anything, deadline: Float} - end - - class << self - sig { params(url: URI::Generic).returns(Net::HTTP) } - def connect(url) - end - - sig { params(conn: Net::HTTP, deadline: Float).void } - def calibrate_socket_timeout(conn, deadline) - end - - sig { params(request: OpenAI::PooledNetRequester::RequestShape).returns(Net::HTTPGenericRequest) } - def build_request(request) - end - end - - sig { params(url: URI::Generic, blk: T.proc.params(arg0: Net::HTTP).void).void } - private def with_pool(url, &blk) - end - - sig do - params(request: OpenAI::PooledNetRequester::RequestShape) - .returns([Net::HTTPResponse, T::Enumerable[String]]) - end - def execute(request) - end - - sig { returns(T.attached_class) } - def self.new - end - end -end diff --git a/rbi/lib/openai/request_options.rbi b/rbi/lib/openai/request_options.rbi deleted file mode 100644 index f55d6e56..00000000 --- a/rbi/lib/openai/request_options.rbi +++ /dev/null @@ -1,88 +0,0 @@ -# typed: strong - -module OpenAI - module RequestParameters - abstract! - - sig { returns(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) } - def request_options - end - - sig do - params(_: T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - .returns(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - end - def request_options=(_) - end - - module Converter - sig { params(params: T.anything).returns([T.anything, T::Hash[Symbol, T.anything]]) } - def dump_request(params) - end - end - end - - class RequestOptions < OpenAI::BaseModel - sig { params(opts: T.any(T.self_type, T::Hash[Symbol, T.anything])).void } - def self.validate!(opts) - end - - sig { returns(T.nilable(String)) } - def idempotency_key - end - - sig { params(_: T.nilable(String)).returns(T.nilable(String)) } - def idempotency_key=(_) - end - - sig { returns(T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))])) } - def extra_query - end - - sig do - params(_: T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))])) - .returns(T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))])) - end - def extra_query=(_) - end - - sig { returns(T.nilable(T::Hash[String, T.nilable(String)])) } - def extra_headers - end - - sig do - params(_: T.nilable(T::Hash[String, T.nilable(String)])) - .returns(T.nilable(T::Hash[String, T.nilable(String)])) - end - def extra_headers=(_) - end - - sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } - def extra_body - end - - sig { params(_: T.nilable(T::Hash[Symbol, T.anything])).returns(T.nilable(T::Hash[Symbol, T.anything])) } - def extra_body=(_) - end - - sig { returns(T.nilable(Integer)) } - def max_retries - end - - sig { params(_: T.nilable(Integer)).returns(T.nilable(Integer)) } - def max_retries=(_) - end - - sig { returns(T.nilable(Float)) } - def timeout - end - - sig { params(_: T.nilable(Float)).returns(T.nilable(Float)) } - def timeout=(_) - end - - sig { params(values: T::Hash[Symbol, T.anything]).returns(T.attached_class) } - def self.new(values = {}) - end - end -end diff --git a/rbi/lib/openai/resources/audio/speech.rbi b/rbi/lib/openai/resources/audio/speech.rbi deleted file mode 100644 index 5ad154db..00000000 --- a/rbi/lib/openai/resources/audio/speech.rbi +++ /dev/null @@ -1,27 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Audio - class Speech - sig do - params( - input: String, - model: T.any(String, Symbol), - voice: Symbol, - response_format: Symbol, - speed: Float, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(T.anything) - end - def create(input:, model:, voice:, response_format: nil, speed: nil, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/resources/audio/transcriptions.rbi b/rbi/lib/openai/resources/audio/transcriptions.rbi deleted file mode 100644 index a88bec77..00000000 --- a/rbi/lib/openai/resources/audio/transcriptions.rbi +++ /dev/null @@ -1,38 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Audio - class Transcriptions - sig do - params( - file: T.any(IO, StringIO), - model: T.any(String, Symbol), - language: String, - prompt: String, - response_format: Symbol, - temperature: Float, - timestamp_granularities: T::Array[Symbol], - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(T.any(OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose)) - end - def create( - file:, - model:, - language: nil, - prompt: nil, - response_format: nil, - temperature: nil, - timestamp_granularities: nil, - request_options: {} - ) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/resources/audio/translations.rbi b/rbi/lib/openai/resources/audio/translations.rbi deleted file mode 100644 index 2b323900..00000000 --- a/rbi/lib/openai/resources/audio/translations.rbi +++ /dev/null @@ -1,27 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Audio - class Translations - sig do - params( - file: T.any(IO, StringIO), - model: T.any(String, Symbol), - prompt: String, - response_format: Symbol, - temperature: Float, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(T.any(OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose)) - end - def create(file:, model:, prompt: nil, response_format: nil, temperature: nil, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/resources/batches.rbi b/rbi/lib/openai/resources/batches.rbi deleted file mode 100644 index ec305bd4..00000000 --- a/rbi/lib/openai/resources/batches.rbi +++ /dev/null @@ -1,55 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Batches - sig do - params( - completion_window: Symbol, - endpoint: Symbol, - input_file_id: String, - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Batch) - end - def create(completion_window:, endpoint:, input_file_id:, metadata: nil, request_options: {}) - end - - sig do - params( - batch_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Batch) - end - def retrieve(batch_id, request_options: {}) - end - - sig do - params( - after: String, - limit: Integer, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::Batch]) - end - def list(after: nil, limit: nil, request_options: {}) - end - - sig do - params( - batch_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Batch) - end - def cancel(batch_id, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/beta/assistants.rbi b/rbi/lib/openai/resources/beta/assistants.rbi deleted file mode 100644 index 72828f08..00000000 --- a/rbi/lib/openai/resources/beta/assistants.rbi +++ /dev/null @@ -1,140 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Beta - class Assistants - sig do - params( - model: T.any(String, Symbol), - description: T.nilable(String), - instructions: T.nilable(String), - metadata: T.nilable(OpenAI::Models::Metadata), - name: T.nilable(String), - reasoning_effort: T.nilable(Symbol), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_resources: T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources), - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ], - top_p: T.nilable(Float), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Assistant) - end - def create( - model:, - description: nil, - instructions: nil, - metadata: nil, - name: nil, - reasoning_effort: nil, - response_format: nil, - temperature: nil, - tool_resources: nil, - tools: nil, - top_p: nil, - request_options: {} - ) - end - - sig do - params( - assistant_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Assistant) - end - def retrieve(assistant_id, request_options: {}) - end - - sig do - params( - assistant_id: String, - description: T.nilable(String), - instructions: T.nilable(String), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.any(String, Symbol), - name: T.nilable(String), - reasoning_effort: T.nilable(Symbol), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_resources: T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources), - tools: T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ], - top_p: T.nilable(Float), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Assistant) - end - def update( - assistant_id, - description: nil, - instructions: nil, - metadata: nil, - model: nil, - name: nil, - reasoning_effort: nil, - response_format: nil, - temperature: nil, - tool_resources: nil, - tools: nil, - top_p: nil, - request_options: {} - ) - end - - sig do - params( - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Assistant]) - end - def list(after: nil, before: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - params( - assistant_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::AssistantDeleted) - end - def delete(assistant_id, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/resources/beta/threads.rbi b/rbi/lib/openai/resources/beta/threads.rbi deleted file mode 100644 index 02bc8060..00000000 --- a/rbi/lib/openai/resources/beta/threads.rbi +++ /dev/null @@ -1,206 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Beta - class Threads - sig { returns(OpenAI::Resources::Beta::Threads::Runs) } - def runs - end - - sig { returns(OpenAI::Resources::Beta::Threads::Messages) } - def messages - end - - sig do - params( - messages: T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message], - metadata: T.nilable(OpenAI::Models::Metadata), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Thread) - end - def create(messages: nil, metadata: nil, tool_resources: nil, request_options: {}) - end - - sig do - params( - thread_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Thread) - end - def retrieve(thread_id, request_options: {}) - end - - sig do - params( - thread_id: String, - metadata: T.nilable(OpenAI::Models::Metadata), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Thread) - end - def update(thread_id, metadata: nil, tool_resources: nil, request_options: {}) - end - - sig do - params( - thread_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::ThreadDeleted) - end - def delete(thread_id, request_options: {}) - end - - sig do - params( - assistant_id: String, - instructions: T.nilable(String), - max_completion_tokens: T.nilable(Integer), - max_prompt_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.nilable(T.any(String, Symbol)), - parallel_tool_calls: T::Boolean, - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - thread: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, - tool_choice: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources), - tools: T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ), - top_p: T.nilable(Float), - truncation_strategy: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::Run) - end - def create_and_run( - assistant_id:, - instructions: nil, - max_completion_tokens: nil, - max_prompt_tokens: nil, - metadata: nil, - model: nil, - parallel_tool_calls: nil, - response_format: nil, - temperature: nil, - thread: nil, - tool_choice: nil, - tool_resources: nil, - tools: nil, - top_p: nil, - truncation_strategy: nil, - request_options: {} - ) - end - - sig do - params( - assistant_id: String, - instructions: T.nilable(String), - max_completion_tokens: T.nilable(Integer), - max_prompt_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.nilable(T.any(String, Symbol)), - parallel_tool_calls: T::Boolean, - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - thread: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, - tool_choice: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)), - tool_resources: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources), - tools: T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ), - top_p: T.nilable(Float), - truncation_strategy: T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns( - OpenAI::Stream[ - T.any( - OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, - OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent - ) - ] - ) - end - def create_and_run_streaming( - assistant_id:, - instructions: nil, - max_completion_tokens: nil, - max_prompt_tokens: nil, - metadata: nil, - model: nil, - parallel_tool_calls: nil, - response_format: nil, - temperature: nil, - thread: nil, - tool_choice: nil, - tool_resources: nil, - tools: nil, - top_p: nil, - truncation_strategy: nil, - request_options: {} - ) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/resources/beta/threads/messages.rbi b/rbi/lib/openai/resources/beta/threads/messages.rbi deleted file mode 100644 index 28c889d1..00000000 --- a/rbi/lib/openai/resources/beta/threads/messages.rbi +++ /dev/null @@ -1,95 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Beta - class Threads - class Messages - sig do - params( - thread_id: String, - content: T.any( - String, - T::Array[ - T.any( - OpenAI::Models::Beta::Threads::ImageFileContentBlock, - OpenAI::Models::Beta::Threads::ImageURLContentBlock, - OpenAI::Models::Beta::Threads::TextContentBlockParam - ) - ] - ), - role: Symbol, - attachments: T.nilable(T::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment]), - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::Message) - end - def create(thread_id, content:, role:, attachments: nil, metadata: nil, request_options: {}) - end - - sig do - params( - message_id: String, - thread_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::Message) - end - def retrieve(message_id, thread_id:, request_options: {}) - end - - sig do - params( - message_id: String, - thread_id: String, - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::Message) - end - def update(message_id, thread_id:, metadata: nil, request_options: {}) - end - - sig do - params( - thread_id: String, - after: String, - before: String, - limit: Integer, - order: Symbol, - run_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Message]) - end - def list( - thread_id, - after: nil, - before: nil, - limit: nil, - order: nil, - run_id: nil, - request_options: {} - ) - end - - sig do - params( - message_id: String, - thread_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::MessageDeleted) - end - def delete(message_id, thread_id:, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end - end -end diff --git a/rbi/lib/openai/resources/beta/threads/runs.rbi b/rbi/lib/openai/resources/beta/threads/runs.rbi deleted file mode 100644 index 7f5df3f4..00000000 --- a/rbi/lib/openai/resources/beta/threads/runs.rbi +++ /dev/null @@ -1,273 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Beta - class Threads - class Runs - sig { returns(OpenAI::Resources::Beta::Threads::Runs::Steps) } - def steps - end - - sig do - params( - thread_id: String, - assistant_id: String, - include: T::Array[Symbol], - additional_instructions: T.nilable(String), - additional_messages: T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage]), - instructions: T.nilable(String), - max_completion_tokens: T.nilable(Integer), - max_prompt_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.nilable(T.any(String, Symbol)), - parallel_tool_calls: T::Boolean, - reasoning_effort: T.nilable(Symbol), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_choice: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)), - tools: T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ), - top_p: T.nilable(Float), - truncation_strategy: T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::Run) - end - def create( - thread_id, - assistant_id:, - include: nil, - additional_instructions: nil, - additional_messages: nil, - instructions: nil, - max_completion_tokens: nil, - max_prompt_tokens: nil, - metadata: nil, - model: nil, - parallel_tool_calls: nil, - reasoning_effort: nil, - response_format: nil, - temperature: nil, - tool_choice: nil, - tools: nil, - top_p: nil, - truncation_strategy: nil, - request_options: {} - ) - end - - sig do - params( - thread_id: String, - assistant_id: String, - include: T::Array[Symbol], - additional_instructions: T.nilable(String), - additional_messages: T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage]), - instructions: T.nilable(String), - max_completion_tokens: T.nilable(Integer), - max_prompt_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - model: T.nilable(T.any(String, Symbol)), - parallel_tool_calls: T::Boolean, - reasoning_effort: T.nilable(Symbol), - response_format: T.nilable( - T.any( - Symbol, - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONObject, - OpenAI::Models::ResponseFormatJSONSchema - ) - ), - temperature: T.nilable(Float), - tool_choice: T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice)), - tools: T.nilable( - T::Array[ - T.any( - OpenAI::Models::Beta::CodeInterpreterTool, - OpenAI::Models::Beta::FileSearchTool, - OpenAI::Models::Beta::FunctionTool - ) - ] - ), - top_p: T.nilable(Float), - truncation_strategy: T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns( - OpenAI::Stream[ - T.any( - OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, - OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent - ) - ] - ) - end - def create_streaming( - thread_id, - assistant_id:, - include: nil, - additional_instructions: nil, - additional_messages: nil, - instructions: nil, - max_completion_tokens: nil, - max_prompt_tokens: nil, - metadata: nil, - model: nil, - parallel_tool_calls: nil, - reasoning_effort: nil, - response_format: nil, - temperature: nil, - tool_choice: nil, - tools: nil, - top_p: nil, - truncation_strategy: nil, - request_options: {} - ) - end - - sig do - params( - run_id: String, - thread_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::Run) - end - def retrieve(run_id, thread_id:, request_options: {}) - end - - sig do - params( - run_id: String, - thread_id: String, - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::Run) - end - def update(run_id, thread_id:, metadata: nil, request_options: {}) - end - - sig do - params( - thread_id: String, - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Run]) - end - def list(thread_id, after: nil, before: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - params( - run_id: String, - thread_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::Run) - end - def cancel(run_id, thread_id:, request_options: {}) - end - - sig do - params( - run_id: String, - thread_id: String, - tool_outputs: T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::Run) - end - def submit_tool_outputs(run_id, thread_id:, tool_outputs:, request_options: {}) - end - - sig do - params( - run_id: String, - thread_id: String, - tool_outputs: T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns( - OpenAI::Stream[ - T.any( - OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, - OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, - OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent - ) - ] - ) - end - def submit_tool_outputs_streaming(run_id, thread_id:, tool_outputs:, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end - end -end diff --git a/rbi/lib/openai/resources/beta/threads/runs/steps.rbi b/rbi/lib/openai/resources/beta/threads/runs/steps.rbi deleted file mode 100644 index 1714ab4d..00000000 --- a/rbi/lib/openai/resources/beta/threads/runs/steps.rbi +++ /dev/null @@ -1,55 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Beta - class Threads - class Runs - class Steps - sig do - params( - step_id: String, - thread_id: String, - run_id: String, - include: T::Array[Symbol], - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Beta::Threads::Runs::RunStep) - end - def retrieve(step_id, thread_id:, run_id:, include: nil, request_options: {}) - end - - sig do - params( - run_id: String, - thread_id: String, - after: String, - before: String, - include: T::Array[Symbol], - limit: Integer, - order: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Runs::RunStep]) - end - def list( - run_id, - thread_id:, - after: nil, - before: nil, - include: nil, - limit: nil, - order: nil, - request_options: {} - ) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end - end - end -end diff --git a/rbi/lib/openai/resources/chat/completions.rbi b/rbi/lib/openai/resources/chat/completions.rbi deleted file mode 100644 index 2ea5223f..00000000 --- a/rbi/lib/openai/resources/chat/completions.rbi +++ /dev/null @@ -1,230 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Chat - class Completions - sig { returns(OpenAI::Resources::Chat::Completions::Messages) } - def messages - end - - sig do - params( - messages: T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, - OpenAI::Models::Chat::ChatCompletionSystemMessageParam, - OpenAI::Models::Chat::ChatCompletionUserMessageParam, - OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, - OpenAI::Models::Chat::ChatCompletionToolMessageParam, - OpenAI::Models::Chat::ChatCompletionFunctionMessageParam - ) - ], - model: T.any(String, Symbol), - audio: T.nilable(OpenAI::Models::Chat::ChatCompletionAudioParam), - frequency_penalty: T.nilable(Float), - function_call: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionFunctionCallOption), - functions: T::Array[OpenAI::Models::Chat::CompletionCreateParams::Function], - logit_bias: T.nilable(T::Hash[Symbol, Integer]), - logprobs: T.nilable(T::Boolean), - max_completion_tokens: T.nilable(Integer), - max_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - modalities: T.nilable(T::Array[Symbol]), - n: T.nilable(Integer), - parallel_tool_calls: T::Boolean, - prediction: T.nilable(OpenAI::Models::Chat::ChatCompletionPredictionContent), - presence_penalty: T.nilable(Float), - reasoning_effort: T.nilable(Symbol), - response_format: T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONSchema, - OpenAI::Models::ResponseFormatJSONObject - ), - seed: T.nilable(Integer), - service_tier: T.nilable(Symbol), - stop: T.nilable(T.any(String, T::Array[String])), - store: T.nilable(T::Boolean), - stream_options: T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions), - temperature: T.nilable(Float), - tool_choice: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionNamedToolChoice), - tools: T::Array[OpenAI::Models::Chat::ChatCompletionTool], - top_logprobs: T.nilable(Integer), - top_p: T.nilable(Float), - user: String, - web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Chat::ChatCompletion) - end - def create( - messages:, - model:, - audio: nil, - frequency_penalty: nil, - function_call: nil, - functions: nil, - logit_bias: nil, - logprobs: nil, - max_completion_tokens: nil, - max_tokens: nil, - metadata: nil, - modalities: nil, - n: nil, - parallel_tool_calls: nil, - prediction: nil, - presence_penalty: nil, - reasoning_effort: nil, - response_format: nil, - seed: nil, - service_tier: nil, - stop: nil, - store: nil, - stream_options: nil, - temperature: nil, - tool_choice: nil, - tools: nil, - top_logprobs: nil, - top_p: nil, - user: nil, - web_search_options: nil, - request_options: {} - ) - end - - sig do - params( - messages: T::Array[ - T.any( - OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, - OpenAI::Models::Chat::ChatCompletionSystemMessageParam, - OpenAI::Models::Chat::ChatCompletionUserMessageParam, - OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, - OpenAI::Models::Chat::ChatCompletionToolMessageParam, - OpenAI::Models::Chat::ChatCompletionFunctionMessageParam - ) - ], - model: T.any(String, Symbol), - audio: T.nilable(OpenAI::Models::Chat::ChatCompletionAudioParam), - frequency_penalty: T.nilable(Float), - function_call: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionFunctionCallOption), - functions: T::Array[OpenAI::Models::Chat::CompletionCreateParams::Function], - logit_bias: T.nilable(T::Hash[Symbol, Integer]), - logprobs: T.nilable(T::Boolean), - max_completion_tokens: T.nilable(Integer), - max_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - modalities: T.nilable(T::Array[Symbol]), - n: T.nilable(Integer), - parallel_tool_calls: T::Boolean, - prediction: T.nilable(OpenAI::Models::Chat::ChatCompletionPredictionContent), - presence_penalty: T.nilable(Float), - reasoning_effort: T.nilable(Symbol), - response_format: T.any( - OpenAI::Models::ResponseFormatText, - OpenAI::Models::ResponseFormatJSONSchema, - OpenAI::Models::ResponseFormatJSONObject - ), - seed: T.nilable(Integer), - service_tier: T.nilable(Symbol), - stop: T.nilable(T.any(String, T::Array[String])), - store: T.nilable(T::Boolean), - stream_options: T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions), - temperature: T.nilable(Float), - tool_choice: T.any(Symbol, OpenAI::Models::Chat::ChatCompletionNamedToolChoice), - tools: T::Array[OpenAI::Models::Chat::ChatCompletionTool], - top_logprobs: T.nilable(Integer), - top_p: T.nilable(Float), - user: String, - web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Stream[OpenAI::Models::Chat::ChatCompletionChunk]) - end - def create_streaming( - messages:, - model:, - audio: nil, - frequency_penalty: nil, - function_call: nil, - functions: nil, - logit_bias: nil, - logprobs: nil, - max_completion_tokens: nil, - max_tokens: nil, - metadata: nil, - modalities: nil, - n: nil, - parallel_tool_calls: nil, - prediction: nil, - presence_penalty: nil, - reasoning_effort: nil, - response_format: nil, - seed: nil, - service_tier: nil, - stop: nil, - store: nil, - stream_options: nil, - temperature: nil, - tool_choice: nil, - tools: nil, - top_logprobs: nil, - top_p: nil, - user: nil, - web_search_options: nil, - request_options: {} - ) - end - - sig do - params( - completion_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Chat::ChatCompletion) - end - def retrieve(completion_id, request_options: {}) - end - - sig do - params( - completion_id: String, - metadata: T.nilable(OpenAI::Models::Metadata), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Chat::ChatCompletion) - end - def update(completion_id, metadata:, request_options: {}) - end - - sig do - params( - after: String, - limit: Integer, - metadata: T.nilable(OpenAI::Models::Metadata), - model: String, - order: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::Chat::ChatCompletion]) - end - def list(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {}) - end - - sig do - params( - completion_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Chat::ChatCompletionDeleted) - end - def delete(completion_id, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/resources/chat/completions/messages.rbi b/rbi/lib/openai/resources/chat/completions/messages.rbi deleted file mode 100644 index 4f39f196..00000000 --- a/rbi/lib/openai/resources/chat/completions/messages.rbi +++ /dev/null @@ -1,28 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Chat - class Completions - class Messages - sig do - params( - completion_id: String, - after: String, - limit: Integer, - order: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::Chat::ChatCompletionStoreMessage]) - end - def list(completion_id, after: nil, limit: nil, order: nil, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end - end -end diff --git a/rbi/lib/openai/resources/completions.rbi b/rbi/lib/openai/resources/completions.rbi deleted file mode 100644 index 5f018bc1..00000000 --- a/rbi/lib/openai/resources/completions.rbi +++ /dev/null @@ -1,115 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Completions - sig do - params( - model: T.any(String, Symbol), - prompt: T.nilable( - T.any( - String, - T::Array[String], - T::Array[Integer], - T::Array[T::Array[Integer]] - ) - ), - best_of: T.nilable(Integer), - echo: T.nilable(T::Boolean), - frequency_penalty: T.nilable(Float), - logit_bias: T.nilable(T::Hash[Symbol, Integer]), - logprobs: T.nilable(Integer), - max_tokens: T.nilable(Integer), - n: T.nilable(Integer), - presence_penalty: T.nilable(Float), - seed: T.nilable(Integer), - stop: T.nilable(T.any(String, T::Array[String])), - stream_options: T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions), - suffix: T.nilable(String), - temperature: T.nilable(Float), - top_p: T.nilable(Float), - user: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Completion) - end - def create( - model:, - prompt:, - best_of: nil, - echo: nil, - frequency_penalty: nil, - logit_bias: nil, - logprobs: nil, - max_tokens: nil, - n: nil, - presence_penalty: nil, - seed: nil, - stop: nil, - stream_options: nil, - suffix: nil, - temperature: nil, - top_p: nil, - user: nil, - request_options: {} - ) - end - - sig do - params( - model: T.any(String, Symbol), - prompt: T.nilable( - T.any( - String, - T::Array[String], - T::Array[Integer], - T::Array[T::Array[Integer]] - ) - ), - best_of: T.nilable(Integer), - echo: T.nilable(T::Boolean), - frequency_penalty: T.nilable(Float), - logit_bias: T.nilable(T::Hash[Symbol, Integer]), - logprobs: T.nilable(Integer), - max_tokens: T.nilable(Integer), - n: T.nilable(Integer), - presence_penalty: T.nilable(Float), - seed: T.nilable(Integer), - stop: T.nilable(T.any(String, T::Array[String])), - stream_options: T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions), - suffix: T.nilable(String), - temperature: T.nilable(Float), - top_p: T.nilable(Float), - user: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Stream[OpenAI::Models::Completion]) - end - def create_streaming( - model:, - prompt:, - best_of: nil, - echo: nil, - frequency_penalty: nil, - logit_bias: nil, - logprobs: nil, - max_tokens: nil, - n: nil, - presence_penalty: nil, - seed: nil, - stop: nil, - stream_options: nil, - suffix: nil, - temperature: nil, - top_p: nil, - user: nil, - request_options: {} - ) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/embeddings.rbi b/rbi/lib/openai/resources/embeddings.rbi deleted file mode 100644 index 86d61392..00000000 --- a/rbi/lib/openai/resources/embeddings.rbi +++ /dev/null @@ -1,25 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Embeddings - sig do - params( - input: T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]), - model: T.any(String, Symbol), - dimensions: Integer, - encoding_format: Symbol, - user: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::CreateEmbeddingResponse) - end - def create(input:, model:, dimensions: nil, encoding_format: nil, user: nil, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/files.rbi b/rbi/lib/openai/resources/files.rbi deleted file mode 100644 index 508e9d63..00000000 --- a/rbi/lib/openai/resources/files.rbi +++ /dev/null @@ -1,65 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Files - sig do - params( - file: T.any(IO, StringIO), - purpose: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::FileObject) - end - def create(file:, purpose:, request_options: {}) - end - - sig do - params( - file_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::FileObject) - end - def retrieve(file_id, request_options: {}) - end - - sig do - params( - after: String, - limit: Integer, - order: Symbol, - purpose: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::FileObject]) - end - def list(after: nil, limit: nil, order: nil, purpose: nil, request_options: {}) - end - - sig do - params( - file_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::FileDeleted) - end - def delete(file_id, request_options: {}) - end - - sig do - params( - file_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(T.anything) - end - def content(file_id, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/fine_tuning.rbi b/rbi/lib/openai/resources/fine_tuning.rbi deleted file mode 100644 index 0699b0ee..00000000 --- a/rbi/lib/openai/resources/fine_tuning.rbi +++ /dev/null @@ -1,15 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class FineTuning - sig { returns(OpenAI::Resources::FineTuning::Jobs) } - def jobs - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/fine_tuning/jobs.rbi b/rbi/lib/openai/resources/fine_tuning/jobs.rbi deleted file mode 100644 index 8ace3da3..00000000 --- a/rbi/lib/openai/resources/fine_tuning/jobs.rbi +++ /dev/null @@ -1,90 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class FineTuning - class Jobs - sig { returns(OpenAI::Resources::FineTuning::Jobs::Checkpoints) } - def checkpoints - end - - sig do - params( - model: T.any(String, Symbol), - training_file: String, - hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters, - integrations: T.nilable(T::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration]), - metadata: T.nilable(OpenAI::Models::Metadata), - method_: OpenAI::Models::FineTuning::JobCreateParams::Method, - seed: T.nilable(Integer), - suffix: T.nilable(String), - validation_file: T.nilable(String), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::FineTuning::FineTuningJob) - end - def create( - model:, - training_file:, - hyperparameters: nil, - integrations: nil, - metadata: nil, - method_: nil, - seed: nil, - suffix: nil, - validation_file: nil, - request_options: {} - ) - end - - sig do - params( - fine_tuning_job_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::FineTuning::FineTuningJob) - end - def retrieve(fine_tuning_job_id, request_options: {}) - end - - sig do - params( - after: String, - limit: Integer, - metadata: T.nilable(T::Hash[Symbol, String]), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::FineTuning::FineTuningJob]) - end - def list(after: nil, limit: nil, metadata: nil, request_options: {}) - end - - sig do - params( - fine_tuning_job_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::FineTuning::FineTuningJob) - end - def cancel(fine_tuning_job_id, request_options: {}) - end - - sig do - params( - fine_tuning_job_id: String, - after: String, - limit: Integer, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::FineTuning::FineTuningJobEvent]) - end - def list_events(fine_tuning_job_id, after: nil, limit: nil, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi b/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi deleted file mode 100644 index 297b57f1..00000000 --- a/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi +++ /dev/null @@ -1,27 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class FineTuning - class Jobs - class Checkpoints - sig do - params( - fine_tuning_job_id: String, - after: String, - limit: Integer, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint]) - end - def list(fine_tuning_job_id, after: nil, limit: nil, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end - end -end diff --git a/rbi/lib/openai/resources/images.rbi b/rbi/lib/openai/resources/images.rbi deleted file mode 100644 index 357eccc3..00000000 --- a/rbi/lib/openai/resources/images.rbi +++ /dev/null @@ -1,88 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Images - sig do - params( - image: T.any(IO, StringIO), - model: T.nilable(T.any(String, Symbol)), - n: T.nilable(Integer), - response_format: T.nilable(Symbol), - size: T.nilable(Symbol), - user: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::ImagesResponse) - end - def create_variation( - image:, - model: nil, - n: nil, - response_format: nil, - size: nil, - user: nil, - request_options: {} - ) - end - - sig do - params( - image: T.any(IO, StringIO), - prompt: String, - mask: T.any(IO, StringIO), - model: T.nilable(T.any(String, Symbol)), - n: T.nilable(Integer), - response_format: T.nilable(Symbol), - size: T.nilable(Symbol), - user: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::ImagesResponse) - end - def edit( - image:, - prompt:, - mask: nil, - model: nil, - n: nil, - response_format: nil, - size: nil, - user: nil, - request_options: {} - ) - end - - sig do - params( - prompt: String, - model: T.nilable(T.any(String, Symbol)), - n: T.nilable(Integer), - quality: Symbol, - response_format: T.nilable(Symbol), - size: T.nilable(Symbol), - style: T.nilable(Symbol), - user: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::ImagesResponse) - end - def generate( - prompt:, - model: nil, - n: nil, - quality: nil, - response_format: nil, - size: nil, - style: nil, - user: nil, - request_options: {} - ) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/models.rbi b/rbi/lib/openai/resources/models.rbi deleted file mode 100644 index 2e4b916b..00000000 --- a/rbi/lib/openai/resources/models.rbi +++ /dev/null @@ -1,38 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Models - sig do - params( - model: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Model) - end - def retrieve(model, request_options: {}) - end - - sig do - params(request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]))) - .returns(OpenAI::Page[OpenAI::Models::Model]) - end - def list(request_options: {}) - end - - sig do - params( - model: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::ModelDeleted) - end - def delete(model, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/moderations.rbi b/rbi/lib/openai/resources/moderations.rbi deleted file mode 100644 index 8b836716..00000000 --- a/rbi/lib/openai/resources/moderations.rbi +++ /dev/null @@ -1,26 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Moderations - sig do - params( - input: T.any( - String, - T::Array[String], - T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] - ), - model: T.any(String, Symbol), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::ModerationCreateResponse) - end - def create(input:, model: nil, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/responses.rbi b/rbi/lib/openai/resources/responses.rbi deleted file mode 100644 index d225d52c..00000000 --- a/rbi/lib/openai/resources/responses.rbi +++ /dev/null @@ -1,177 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Responses - sig { returns(OpenAI::Resources::Responses::InputItems) } - def input_items - end - - sig do - params( - input: T.any(String, OpenAI::Models::Responses::ResponseInput), - model: T.any(String, Symbol), - include: T.nilable(T::Array[Symbol]), - instructions: T.nilable(String), - max_output_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - parallel_tool_calls: T.nilable(T::Boolean), - previous_response_id: T.nilable(String), - reasoning: T.nilable(OpenAI::Models::Reasoning), - store: T.nilable(T::Boolean), - temperature: T.nilable(Float), - text: OpenAI::Models::Responses::ResponseTextConfig, - tool_choice: T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction), - tools: T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ], - top_p: T.nilable(Float), - truncation: T.nilable(Symbol), - user: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Responses::Response) - end - def create( - input:, - model:, - include: nil, - instructions: nil, - max_output_tokens: nil, - metadata: nil, - parallel_tool_calls: nil, - previous_response_id: nil, - reasoning: nil, - store: nil, - temperature: nil, - text: nil, - tool_choice: nil, - tools: nil, - top_p: nil, - truncation: nil, - user: nil, - request_options: {} - ) - end - - sig do - params( - input: T.any(String, OpenAI::Models::Responses::ResponseInput), - model: T.any(String, Symbol), - include: T.nilable(T::Array[Symbol]), - instructions: T.nilable(String), - max_output_tokens: T.nilable(Integer), - metadata: T.nilable(OpenAI::Models::Metadata), - parallel_tool_calls: T.nilable(T::Boolean), - previous_response_id: T.nilable(String), - reasoning: T.nilable(OpenAI::Models::Reasoning), - store: T.nilable(T::Boolean), - temperature: T.nilable(Float), - text: OpenAI::Models::Responses::ResponseTextConfig, - tool_choice: T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction), - tools: T::Array[ - T.any( - OpenAI::Models::Responses::FileSearchTool, - OpenAI::Models::Responses::FunctionTool, - OpenAI::Models::Responses::ComputerTool, - OpenAI::Models::Responses::WebSearchTool - ) - ], - top_p: T.nilable(Float), - truncation: T.nilable(Symbol), - user: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns( - OpenAI::Stream[ - T.any( - OpenAI::Models::Responses::ResponseAudioDeltaEvent, - OpenAI::Models::Responses::ResponseAudioDoneEvent, - OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, - OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, - OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, - OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, - OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, - OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, - OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, - OpenAI::Models::Responses::ResponseCompletedEvent, - OpenAI::Models::Responses::ResponseContentPartAddedEvent, - OpenAI::Models::Responses::ResponseContentPartDoneEvent, - OpenAI::Models::Responses::ResponseCreatedEvent, - OpenAI::Models::Responses::ResponseErrorEvent, - OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, - OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, - OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, - OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, - OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, - OpenAI::Models::Responses::ResponseInProgressEvent, - OpenAI::Models::Responses::ResponseFailedEvent, - OpenAI::Models::Responses::ResponseIncompleteEvent, - OpenAI::Models::Responses::ResponseOutputItemAddedEvent, - OpenAI::Models::Responses::ResponseOutputItemDoneEvent, - OpenAI::Models::Responses::ResponseRefusalDeltaEvent, - OpenAI::Models::Responses::ResponseRefusalDoneEvent, - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent, - OpenAI::Models::Responses::ResponseTextDeltaEvent, - OpenAI::Models::Responses::ResponseTextDoneEvent, - OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, - OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, - OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent - ) - ] - ) - end - def create_streaming( - input:, - model:, - include: nil, - instructions: nil, - max_output_tokens: nil, - metadata: nil, - parallel_tool_calls: nil, - previous_response_id: nil, - reasoning: nil, - store: nil, - temperature: nil, - text: nil, - tool_choice: nil, - tools: nil, - top_p: nil, - truncation: nil, - user: nil, - request_options: {} - ) - end - - sig do - params( - response_id: String, - include: T::Array[Symbol], - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Responses::Response) - end - def retrieve(response_id, include: nil, request_options: {}) - end - - sig do - params( - response_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .void - end - def delete(response_id, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/responses/input_items.rbi b/rbi/lib/openai/resources/responses/input_items.rbi deleted file mode 100644 index 5c8d359e..00000000 --- a/rbi/lib/openai/resources/responses/input_items.rbi +++ /dev/null @@ -1,40 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Responses - class InputItems - sig do - params( - response_id: String, - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns( - OpenAI::CursorPage[ - T.any( - OpenAI::Models::Responses::ResponseItemList::Data::Message, - OpenAI::Models::Responses::ResponseOutputMessage, - OpenAI::Models::Responses::ResponseFileSearchToolCall, - OpenAI::Models::Responses::ResponseComputerToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput, - OpenAI::Models::Responses::ResponseFunctionWebSearch, - OpenAI::Models::Responses::ResponseFunctionToolCall, - OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput - ) - ] - ) - end - def list(response_id, after: nil, before: nil, limit: nil, order: nil, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/resources/uploads.rbi b/rbi/lib/openai/resources/uploads.rbi deleted file mode 100644 index 094b93ea..00000000 --- a/rbi/lib/openai/resources/uploads.rbi +++ /dev/null @@ -1,50 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Uploads - sig { returns(OpenAI::Resources::Uploads::Parts) } - def parts - end - - sig do - params( - bytes: Integer, - filename: String, - mime_type: String, - purpose: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Upload) - end - def create(bytes:, filename:, mime_type:, purpose:, request_options: {}) - end - - sig do - params( - upload_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Upload) - end - def cancel(upload_id, request_options: {}) - end - - sig do - params( - upload_id: String, - part_ids: T::Array[String], - md5: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Upload) - end - def complete(upload_id, part_ids:, md5: nil, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/uploads/parts.rbi b/rbi/lib/openai/resources/uploads/parts.rbi deleted file mode 100644 index e32900d9..00000000 --- a/rbi/lib/openai/resources/uploads/parts.rbi +++ /dev/null @@ -1,24 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class Uploads - class Parts - sig do - params( - upload_id: String, - data: T.any(IO, StringIO), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::Uploads::UploadPart) - end - def create(upload_id, data:, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/resources/vector_stores.rbi b/rbi/lib/openai/resources/vector_stores.rbi deleted file mode 100644 index fdbb2632..00000000 --- a/rbi/lib/openai/resources/vector_stores.rbi +++ /dev/null @@ -1,112 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class VectorStores - sig { returns(OpenAI::Resources::VectorStores::Files) } - def files - end - - sig { returns(OpenAI::Resources::VectorStores::FileBatches) } - def file_batches - end - - sig do - params( - chunking_strategy: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ), - expires_after: OpenAI::Models::VectorStoreCreateParams::ExpiresAfter, - file_ids: T::Array[String], - metadata: T.nilable(OpenAI::Models::Metadata), - name: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStore) - end - def create( - chunking_strategy: nil, - expires_after: nil, - file_ids: nil, - metadata: nil, - name: nil, - request_options: {} - ) - end - - sig do - params( - vector_store_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStore) - end - def retrieve(vector_store_id, request_options: {}) - end - - sig do - params( - vector_store_id: String, - expires_after: T.nilable(OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter), - metadata: T.nilable(OpenAI::Models::Metadata), - name: T.nilable(String), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStore) - end - def update(vector_store_id, expires_after: nil, metadata: nil, name: nil, request_options: {}) - end - - sig do - params( - after: String, - before: String, - limit: Integer, - order: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::VectorStore]) - end - def list(after: nil, before: nil, limit: nil, order: nil, request_options: {}) - end - - sig do - params( - vector_store_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStoreDeleted) - end - def delete(vector_store_id, request_options: {}) - end - - sig do - params( - vector_store_id: String, - query: T.any(String, T::Array[String]), - filters: T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter), - max_num_results: Integer, - ranking_options: OpenAI::Models::VectorStoreSearchParams::RankingOptions, - rewrite_query: T::Boolean, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Page[OpenAI::Models::VectorStoreSearchResponse]) - end - def search( - vector_store_id, - query:, - filters: nil, - max_num_results: nil, - ranking_options: nil, - rewrite_query: nil, - request_options: {} - ) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end -end diff --git a/rbi/lib/openai/resources/vector_stores/file_batches.rbi b/rbi/lib/openai/resources/vector_stores/file_batches.rbi deleted file mode 100644 index bcec19cb..00000000 --- a/rbi/lib/openai/resources/vector_stores/file_batches.rbi +++ /dev/null @@ -1,76 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class VectorStores - class FileBatches - sig do - params( - vector_store_id: String, - file_ids: T::Array[String], - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - chunking_strategy: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStores::VectorStoreFileBatch) - end - def create(vector_store_id, file_ids:, attributes: nil, chunking_strategy: nil, request_options: {}) - end - - sig do - params( - batch_id: String, - vector_store_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStores::VectorStoreFileBatch) - end - def retrieve(batch_id, vector_store_id:, request_options: {}) - end - - sig do - params( - batch_id: String, - vector_store_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStores::VectorStoreFileBatch) - end - def cancel(batch_id, vector_store_id:, request_options: {}) - end - - sig do - params( - batch_id: String, - vector_store_id: String, - after: String, - before: String, - filter: Symbol, - limit: Integer, - order: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::VectorStores::VectorStoreFile]) - end - def list_files( - batch_id, - vector_store_id:, - after: nil, - before: nil, - filter: nil, - limit: nil, - order: nil, - request_options: {} - ) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/resources/vector_stores/files.rbi b/rbi/lib/openai/resources/vector_stores/files.rbi deleted file mode 100644 index 85c53733..00000000 --- a/rbi/lib/openai/resources/vector_stores/files.rbi +++ /dev/null @@ -1,97 +0,0 @@ -# typed: strong - -module OpenAI - module Resources - class VectorStores - class Files - sig do - params( - vector_store_id: String, - file_id: String, - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - chunking_strategy: T.any( - OpenAI::Models::AutoFileChunkingStrategyParam, - OpenAI::Models::StaticFileChunkingStrategyObjectParam - ), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStores::VectorStoreFile) - end - def create(vector_store_id, file_id:, attributes: nil, chunking_strategy: nil, request_options: {}) - end - - sig do - params( - file_id: String, - vector_store_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStores::VectorStoreFile) - end - def retrieve(file_id, vector_store_id:, request_options: {}) - end - - sig do - params( - file_id: String, - vector_store_id: String, - attributes: T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)]), - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStores::VectorStoreFile) - end - def update(file_id, vector_store_id:, attributes:, request_options: {}) - end - - sig do - params( - vector_store_id: String, - after: String, - before: String, - filter: Symbol, - limit: Integer, - order: Symbol, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::CursorPage[OpenAI::Models::VectorStores::VectorStoreFile]) - end - def list( - vector_store_id, - after: nil, - before: nil, - filter: nil, - limit: nil, - order: nil, - request_options: {} - ) - end - - sig do - params( - file_id: String, - vector_store_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Models::VectorStores::VectorStoreFileDeleted) - end - def delete(file_id, vector_store_id:, request_options: {}) - end - - sig do - params( - file_id: String, - vector_store_id: String, - request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) - ) - .returns(OpenAI::Page[OpenAI::Models::VectorStores::FileContentResponse]) - end - def content(file_id, vector_store_id:, request_options: {}) - end - - sig { params(client: OpenAI::Client).returns(T.attached_class) } - def self.new(client:) - end - end - end - end -end diff --git a/rbi/lib/openai/stream.rbi b/rbi/lib/openai/stream.rbi deleted file mode 100644 index f4bf6fa9..00000000 --- a/rbi/lib/openai/stream.rbi +++ /dev/null @@ -1,11 +0,0 @@ -# typed: strong - -module OpenAI - class Stream < OpenAI::BaseStream - Elem = type_member(:out) - - sig { override.returns(T::Enumerable[Elem]) } - private def iterator - end - end -end diff --git a/rbi/lib/openai/util.rbi b/rbi/lib/openai/util.rbi deleted file mode 100644 index 8b354b0d..00000000 --- a/rbi/lib/openai/util.rbi +++ /dev/null @@ -1,190 +0,0 @@ -# typed: strong - -module OpenAI - module Util - sig { returns(Float) } - def self.monotonic_secs - end - - class << self - sig { returns(String) } - def arch - end - - sig { returns(String) } - def os - end - end - - class << self - sig { params(input: T.anything).returns(T.any(T::Boolean, T.anything)) } - def primitive?(input) - end - - sig { params(input: T.anything).returns(T.any(T::Boolean, T.anything)) } - def coerce_boolean(input) - end - - sig { params(input: T.anything).returns(T.nilable(T::Boolean)) } - def coerce_boolean!(input) - end - - sig { params(input: T.anything).returns(T.any(Integer, T.anything)) } - def coerce_integer(input) - end - - sig { params(input: T.anything).returns(T.any(Float, T.anything)) } - def coerce_float(input) - end - - sig { params(input: T.anything).returns(T.any(T::Hash[T.anything, T.anything], T.anything)) } - def coerce_hash(input) - end - end - - OMIT = T.let(T.anything, T.anything) - - class << self - sig { params(lhs: T.anything, rhs: T.anything, concat: T::Boolean).returns(T.anything) } - private def deep_merge_lr(lhs, rhs, concat: false) - end - - sig do - params(values: T::Array[T.anything], sentinel: T.nilable(T.anything), concat: T::Boolean) - .returns(T.anything) - end - def deep_merge(*values, sentinel: nil, concat: false) - end - - sig do - params( - data: T.any(T::Hash[Symbol, T.anything], T::Array[T.anything], T.anything), - pick: T.nilable(T.any(Symbol, Integer, T::Array[T.any(Symbol, Integer)])), - sentinel: T.nilable(T.anything), - blk: T.nilable(T.proc.returns(T.anything)) - ) - .returns(T.nilable(T.anything)) - end - def dig(data, pick, sentinel = nil, &blk) - end - end - - class << self - sig { params(uri: URI::Generic).returns(String) } - def uri_origin(uri) - end - - sig { params(path: T.any(String, T::Array[String])).returns(String) } - def interpolate_path(path) - end - end - - class << self - sig { params(query: T.nilable(String)).returns(T::Hash[String, T::Array[String]]) } - def decode_query(query) - end - - sig do - params(query: T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))])) - .returns(T.nilable(String)) - end - def encode_query(query) - end - end - - ParsedUriShape = T.type_alias do - { - scheme: T.nilable(String), - host: T.nilable(String), - port: T.nilable(Integer), - path: T.nilable(String), - query: T::Hash[String, T::Array[String]] - } - end - - class << self - sig { params(url: T.any(URI::Generic, String)).returns(OpenAI::Util::ParsedUriShape) } - def parse_uri(url) - end - - sig { params(parsed: OpenAI::Util::ParsedUriShape).returns(URI::Generic) } - def unparse_uri(parsed) - end - - sig do - params(lhs: OpenAI::Util::ParsedUriShape, rhs: OpenAI::Util::ParsedUriShape).returns(URI::Generic) - end - def join_parsed_uri(lhs, rhs) - end - end - - class << self - sig do - params( - headers: T::Hash[String, - T.nilable(T.any(String, Integer, T::Array[T.nilable(T.any(String, Integer))]))] - ) - .returns(T::Hash[String, String]) - end - def normalized_headers(*headers) - end - end - - class << self - sig { params(io: StringIO, boundary: String, key: T.any(Symbol, String), val: T.anything).void } - private def encode_multipart_formdata(io, boundary:, key:, val:) - end - - sig { params(headers: T::Hash[String, String], body: T.anything).returns(T.anything) } - def encode_content(headers, body) - end - - sig do - params( - headers: T.any(T::Hash[String, String], Net::HTTPHeader), - stream: T::Enumerable[String], - suppress_error: T::Boolean - ) - .returns(T.anything) - end - def decode_content(headers, stream:, suppress_error: false) - end - end - - class << self - sig do - params(enum: T::Enumerable[T.anything], external: T::Boolean, close: T.proc.void) - .returns(T::Enumerable[T.anything]) - end - def fused_enum(enum, external: false, &close) - end - - sig { params(enum: T.nilable(T::Enumerable[T.anything])).void } - def close_fused!(enum) - end - - sig do - params( - enum: T.nilable(T::Enumerable[T.anything]), - blk: T.proc.params(arg0: Enumerator::Yielder).void - ).void - end - def chain_fused(enum, &blk) - end - end - - SSEMessage = T.type_alias do - {event: T.nilable(String), data: T.nilable(String), id: T.nilable(String), retry: T.nilable(Integer)} - end - - class << self - sig { params(enum: T::Enumerable[String]).returns(T::Enumerable[String]) } - def decode_lines(enum) - end - - sig { params(lines: T::Enumerable[String]).returns(OpenAI::Util::SSEMessage) } - def decode_sse(lines) - end - end - end -end diff --git a/rbi/lib/openai/version.rbi b/rbi/lib/openai/version.rbi deleted file mode 100644 index 4cab3ef4..00000000 --- a/rbi/lib/openai/version.rbi +++ /dev/null @@ -1,5 +0,0 @@ -# typed: strong - -module OpenAI - VERSION = "0.0.1-alpha.0" -end diff --git a/rbi/openai/client.rbi b/rbi/openai/client.rbi new file mode 100644 index 00000000..260d8bb8 --- /dev/null +++ b/rbi/openai/client.rbi @@ -0,0 +1,115 @@ +# typed: strong + +module OpenAI + class Client < OpenAI::Internal::Transport::BaseClient + DEFAULT_MAX_RETRIES = 2 + + DEFAULT_TIMEOUT_IN_SECONDS = T.let(600.0, Float) + + DEFAULT_INITIAL_RETRY_DELAY = T.let(0.5, Float) + + DEFAULT_MAX_RETRY_DELAY = T.let(8.0, Float) + + sig { returns(String) } + attr_reader :api_key + + sig { returns(T.nilable(String)) } + attr_reader :organization + + sig { returns(T.nilable(String)) } + attr_reader :project + + sig { returns(OpenAI::Resources::Completions) } + attr_reader :completions + + sig { returns(OpenAI::Resources::Chat) } + attr_reader :chat + + sig { returns(OpenAI::Resources::Embeddings) } + attr_reader :embeddings + + sig { returns(OpenAI::Resources::Files) } + attr_reader :files + + sig { returns(OpenAI::Resources::Images) } + attr_reader :images + + sig { returns(OpenAI::Resources::Audio) } + attr_reader :audio + + sig { returns(OpenAI::Resources::Moderations) } + attr_reader :moderations + + sig { returns(OpenAI::Resources::Models) } + attr_reader :models + + sig { returns(OpenAI::Resources::FineTuning) } + attr_reader :fine_tuning + + sig { returns(OpenAI::Resources::Graders) } + attr_reader :graders + + sig { returns(OpenAI::Resources::VectorStores) } + attr_reader :vector_stores + + sig { returns(OpenAI::Resources::Webhooks) } + attr_reader :webhooks + + sig { returns(OpenAI::Resources::Beta) } + attr_reader :beta + + sig { returns(OpenAI::Resources::Batches) } + attr_reader :batches + + sig { returns(OpenAI::Resources::Uploads) } + attr_reader :uploads + + sig { returns(OpenAI::Resources::Responses) } + attr_reader :responses + + sig { returns(OpenAI::Resources::Conversations) } + attr_reader :conversations + + sig { returns(OpenAI::Resources::Evals) } + attr_reader :evals + + sig { returns(OpenAI::Resources::Containers) } + attr_reader :containers + + # @api private + sig { override.returns(T::Hash[String, String]) } + private def auth_headers + end + + # Creates and returns a new client for interacting with the API. + sig do + params( + api_key: T.nilable(String), + organization: T.nilable(String), + project: T.nilable(String), + base_url: T.nilable(String), + max_retries: Integer, + timeout: Float, + initial_retry_delay: Float, + max_retry_delay: Float + ).returns(T.attached_class) + end + def self.new( + # Defaults to `ENV["OPENAI_API_KEY"]` + api_key: ENV["OPENAI_API_KEY"], + # Defaults to `ENV["OPENAI_ORG_ID"]` + organization: ENV["OPENAI_ORG_ID"], + # Defaults to `ENV["OPENAI_PROJECT_ID"]` + project: ENV["OPENAI_PROJECT_ID"], + # Override the default base URL for the API, e.g., + # `"https://api.example.com/v2/"`. Defaults to `ENV["OPENAI_BASE_URL"]` + base_url: ENV["OPENAI_BASE_URL"], + # Max number of retries to attempt after a failed retryable request. + max_retries: OpenAI::Client::DEFAULT_MAX_RETRIES, + timeout: OpenAI::Client::DEFAULT_TIMEOUT_IN_SECONDS, + initial_retry_delay: OpenAI::Client::DEFAULT_INITIAL_RETRY_DELAY, + max_retry_delay: OpenAI::Client::DEFAULT_MAX_RETRY_DELAY + ) + end + end +end diff --git a/rbi/openai/errors.rbi b/rbi/openai/errors.rbi new file mode 100644 index 00000000..b3cfeb33 --- /dev/null +++ b/rbi/openai/errors.rbi @@ -0,0 +1,205 @@ +# typed: strong + +module OpenAI + module Errors + class Error < StandardError + sig { returns(T.nilable(StandardError)) } + attr_accessor :cause + end + + class ConversionError < OpenAI::Errors::Error + sig { returns(T.nilable(StandardError)) } + def cause + end + + # @api private + sig do + params( + on: T::Class[StandardError], + method: Symbol, + target: T.anything, + value: T.anything, + cause: T.nilable(StandardError) + ).returns(T.attached_class) + end + def self.new(on:, method:, target:, value:, cause: nil) + end + end + + class APIError < OpenAI::Errors::Error + sig { returns(URI::Generic) } + attr_accessor :url + + sig { returns(T.nilable(Integer)) } + attr_accessor :status + + sig { returns(T.nilable(T.anything)) } + attr_accessor :body + + sig { returns(T.nilable(String)) } + attr_accessor :code + + sig { returns(T.nilable(String)) } + attr_accessor :param + + sig { returns(T.nilable(String)) } + attr_accessor :type + + # @api private + sig do + params( + url: URI::Generic, + status: T.nilable(Integer), + body: T.nilable(Object), + request: NilClass, + response: NilClass, + message: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + url:, + status: nil, + body: nil, + request: nil, + response: nil, + message: nil + ) + end + end + + class APIConnectionError < OpenAI::Errors::APIError + sig { returns(NilClass) } + attr_accessor :status + + sig { returns(NilClass) } + attr_accessor :body + + sig { returns(NilClass) } + attr_accessor :code + + sig { returns(NilClass) } + attr_accessor :param + + sig { returns(NilClass) } + attr_accessor :type + + # @api private + sig do + params( + url: URI::Generic, + status: NilClass, + body: NilClass, + request: NilClass, + response: NilClass, + message: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + url:, + status: nil, + body: nil, + request: nil, + response: nil, + message: "Connection error." + ) + end + end + + class APITimeoutError < OpenAI::Errors::APIConnectionError + # @api private + sig do + params( + url: URI::Generic, + status: NilClass, + body: NilClass, + request: NilClass, + response: NilClass, + message: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + url:, + status: nil, + body: nil, + request: nil, + response: nil, + message: "Request timed out." + ) + end + end + + class APIStatusError < OpenAI::Errors::APIError + # @api private + sig do + params( + url: URI::Generic, + status: Integer, + body: T.nilable(Object), + request: NilClass, + response: NilClass, + message: T.nilable(String) + ).returns(T.attached_class) + end + def self.for(url:, status:, body:, request:, response:, message: nil) + end + + sig { returns(Integer) } + attr_accessor :status + + sig { returns(T.nilable(String)) } + attr_accessor :code + + sig { returns(T.nilable(String)) } + attr_accessor :param + + sig { returns(T.nilable(String)) } + attr_accessor :type + + # @api private + sig do + params( + url: URI::Generic, + status: Integer, + body: T.nilable(Object), + request: NilClass, + response: NilClass, + message: T.nilable(String) + ).returns(T.attached_class) + end + def self.new(url:, status:, body:, request:, response:, message: nil) + end + end + + class BadRequestError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 400 + end + + class AuthenticationError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 401 + end + + class PermissionDeniedError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 403 + end + + class NotFoundError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 404 + end + + class ConflictError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 409 + end + + class UnprocessableEntityError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 422 + end + + class RateLimitError < OpenAI::Errors::APIStatusError + HTTP_STATUS = 429 + end + + class InternalServerError < OpenAI::Errors::APIStatusError + HTTP_STATUS = T.let((500..), T::Range[Integer]) + end + end +end diff --git a/rbi/openai/file_part.rbi b/rbi/openai/file_part.rbi new file mode 100644 index 00000000..5df7359c --- /dev/null +++ b/rbi/openai/file_part.rbi @@ -0,0 +1,37 @@ +# typed: strong + +module OpenAI + class FilePart + sig { returns(T.any(Pathname, StringIO, IO, String)) } + attr_reader :content + + sig { returns(T.nilable(String)) } + attr_reader :content_type + + sig { returns(T.nilable(String)) } + attr_reader :filename + + # @api private + sig { returns(String) } + private def read + end + + sig { params(a: T.anything).returns(String) } + def to_json(*a) + end + + sig { params(a: T.anything).returns(String) } + def to_yaml(*a) + end + + sig do + params( + content: T.any(Pathname, StringIO, IO, String), + filename: T.nilable(String), + content_type: T.nilable(String) + ).returns(T.attached_class) + end + def self.new(content, filename: nil, content_type: nil) + end + end +end diff --git a/rbi/openai/internal.rbi b/rbi/openai/internal.rbi new file mode 100644 index 00000000..eeddce6e --- /dev/null +++ b/rbi/openai/internal.rbi @@ -0,0 +1,16 @@ +# typed: strong + +module OpenAI + module Internal + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + # Due to the current WIP status of Shapes support in Sorbet, types referencing + # this alias might be refined in the future. + AnyHash = T.type_alias { T::Hash[Symbol, T.anything] } + + FileInput = + T.type_alias { T.any(Pathname, StringIO, IO, String, OpenAI::FilePart) } + + OMIT = T.let(Object.new.freeze, T.anything) + end +end diff --git a/rbi/openai/internal/conversation_cursor_page.rbi b/rbi/openai/internal/conversation_cursor_page.rbi new file mode 100644 index 00000000..122b3c11 --- /dev/null +++ b/rbi/openai/internal/conversation_cursor_page.rbi @@ -0,0 +1,25 @@ +# typed: strong + +module OpenAI + module Internal + class ConversationCursorPage + include OpenAI::Internal::Type::BasePage + + Elem = type_member + + sig { returns(T.nilable(T::Array[Elem])) } + attr_accessor :data + + sig { returns(T::Boolean) } + attr_accessor :has_more + + sig { returns(String) } + attr_accessor :last_id + + # @api private + sig { returns(String) } + def inspect + end + end + end +end diff --git a/rbi/openai/internal/cursor_page.rbi b/rbi/openai/internal/cursor_page.rbi new file mode 100644 index 00000000..4eb4d3b1 --- /dev/null +++ b/rbi/openai/internal/cursor_page.rbi @@ -0,0 +1,22 @@ +# typed: strong + +module OpenAI + module Internal + class CursorPage + include OpenAI::Internal::Type::BasePage + + Elem = type_member + + sig { returns(T.nilable(T::Array[Elem])) } + attr_accessor :data + + sig { returns(T::Boolean) } + attr_accessor :has_more + + # @api private + sig { returns(String) } + def inspect + end + end + end +end diff --git a/rbi/openai/internal/page.rbi b/rbi/openai/internal/page.rbi new file mode 100644 index 00000000..6f9f6551 --- /dev/null +++ b/rbi/openai/internal/page.rbi @@ -0,0 +1,22 @@ +# typed: strong + +module OpenAI + module Internal + class Page + include OpenAI::Internal::Type::BasePage + + Elem = type_member + + sig { returns(T.nilable(T::Array[Elem])) } + attr_accessor :data + + sig { returns(String) } + attr_accessor :object + + # @api private + sig { returns(String) } + def inspect + end + end + end +end diff --git a/rbi/openai/internal/stream.rbi b/rbi/openai/internal/stream.rbi new file mode 100644 index 00000000..ffcb1934 --- /dev/null +++ b/rbi/openai/internal/stream.rbi @@ -0,0 +1,18 @@ +# typed: strong + +module OpenAI + module Internal + class Stream + Message = + type_member(:in) { { fixed: OpenAI::Internal::Util::ServerSentEvent } } + Elem = type_member(:out) + + include OpenAI::Internal::Type::BaseStream + + # @api private + sig { override.returns(T::Enumerable[Elem]) } + private def iterator + end + end + end +end diff --git a/rbi/openai/internal/transport/base_client.rbi b/rbi/openai/internal/transport/base_client.rbi new file mode 100644 index 00000000..095d4476 --- /dev/null +++ b/rbi/openai/internal/transport/base_client.rbi @@ -0,0 +1,309 @@ +# typed: strong + +module OpenAI + module Internal + module Transport + # @api private + class BaseClient + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + abstract! + + RequestComponents = + T.type_alias do + { + method: Symbol, + path: T.any(String, T::Array[String]), + query: + T.nilable( + T::Hash[String, T.nilable(T.any(T::Array[String], String))] + ), + headers: + T.nilable( + T::Hash[ + String, + T.nilable( + T.any( + String, + Integer, + T::Array[T.nilable(T.any(String, Integer))] + ) + ) + ] + ), + body: T.nilable(T.anything), + unwrap: + T.nilable( + T.any( + Symbol, + Integer, + T::Array[T.any(Symbol, Integer)], + T.proc.params(arg0: T.anything).returns(T.anything) + ) + ), + page: + T.nilable( + T::Class[ + OpenAI::Internal::Type::BasePage[ + OpenAI::Internal::Type::BaseModel + ] + ] + ), + stream: + T.nilable( + T::Class[ + OpenAI::Internal::Type::BaseStream[ + T.anything, + OpenAI::Internal::Type::BaseModel + ] + ] + ), + model: T.nilable(OpenAI::Internal::Type::Converter::Input), + options: T.nilable(OpenAI::RequestOptions::OrHash) + } + end + + RequestInput = + T.type_alias do + { + method: Symbol, + url: URI::Generic, + headers: T::Hash[String, String], + body: T.anything, + max_retries: Integer, + timeout: Float + } + end + + # from whatwg fetch spec + MAX_REDIRECTS = 20 + + PLATFORM_HEADERS = T::Hash[String, String] + + class << self + # @api private + sig do + params( + req: OpenAI::Internal::Transport::BaseClient::RequestComponents + ).void + end + def validate!(req) + end + + # @api private + sig do + params( + status: Integer, + headers: T.any(T::Hash[String, String], Net::HTTPHeader) + ).returns(T::Boolean) + end + def should_retry?(status, headers:) + end + + # @api private + sig do + params( + request: OpenAI::Internal::Transport::BaseClient::RequestInput, + status: Integer, + response_headers: T.any(T::Hash[String, String], Net::HTTPHeader) + ).returns(OpenAI::Internal::Transport::BaseClient::RequestInput) + end + def follow_redirect(request, status:, response_headers:) + end + + # @api private + sig do + params( + status: T.any(Integer, OpenAI::Errors::APIConnectionError), + stream: T.nilable(T::Enumerable[String]) + ).void + end + def reap_connection!(status, stream:) + end + end + + sig { returns(URI::Generic) } + attr_reader :base_url + + sig { returns(Float) } + attr_reader :timeout + + sig { returns(Integer) } + attr_reader :max_retries + + sig { returns(Float) } + attr_reader :initial_retry_delay + + sig { returns(Float) } + attr_reader :max_retry_delay + + sig { returns(T::Hash[String, String]) } + attr_reader :headers + + sig { returns(T.nilable(String)) } + attr_reader :idempotency_header + + # @api private + sig { returns(OpenAI::Internal::Transport::PooledNetRequester) } + attr_reader :requester + + # @api private + sig do + params( + base_url: String, + timeout: Float, + max_retries: Integer, + initial_retry_delay: Float, + max_retry_delay: Float, + headers: + T::Hash[ + String, + T.nilable( + T.any( + String, + Integer, + T::Array[T.nilable(T.any(String, Integer))] + ) + ) + ], + idempotency_header: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + base_url:, + timeout: 0.0, + max_retries: 0, + initial_retry_delay: 0.0, + max_retry_delay: 0.0, + headers: {}, + idempotency_header: nil + ) + end + + # @api private + sig { overridable.returns(T::Hash[String, String]) } + private def auth_headers + end + + # @api private + sig { returns(String) } + private def generate_idempotency_key + end + + # @api private + sig do + overridable + .params( + req: OpenAI::Internal::Transport::BaseClient::RequestComponents, + opts: OpenAI::Internal::AnyHash + ) + .returns(OpenAI::Internal::Transport::BaseClient::RequestInput) + end + private def build_request(req, opts) + end + + # @api private + sig do + params( + headers: T::Hash[String, String], + retry_count: Integer + ).returns(Float) + end + private def retry_delay(headers, retry_count:) + end + + # @api private + sig do + params( + request: OpenAI::Internal::Transport::BaseClient::RequestInput, + redirect_count: Integer, + retry_count: Integer, + send_retry_header: T::Boolean + ).returns([Integer, Net::HTTPResponse, T::Enumerable[String]]) + end + def send_request( + request, + redirect_count:, + retry_count:, + send_retry_header: + ) + end + + # Execute the request specified by `req`. This is the method that all resource + # methods call into. + # + # @overload request(method, path, query: {}, headers: {}, body: nil, unwrap: nil, page: nil, stream: nil, model: OpenAI::Internal::Type::Unknown, options: {}) + sig do + params( + method: Symbol, + path: T.any(String, T::Array[String]), + query: + T.nilable( + T::Hash[String, T.nilable(T.any(T::Array[String], String))] + ), + headers: + T.nilable( + T::Hash[ + String, + T.nilable( + T.any( + String, + Integer, + T::Array[T.nilable(T.any(String, Integer))] + ) + ) + ] + ), + body: T.nilable(T.anything), + unwrap: + T.nilable( + T.any( + Symbol, + Integer, + T::Array[T.any(Symbol, Integer)], + T.proc.params(arg0: T.anything).returns(T.anything) + ) + ), + page: + T.nilable( + T::Class[ + OpenAI::Internal::Type::BasePage[ + OpenAI::Internal::Type::BaseModel + ] + ] + ), + stream: + T.nilable( + T::Class[ + OpenAI::Internal::Type::BaseStream[ + T.anything, + OpenAI::Internal::Type::BaseModel + ] + ] + ), + model: T.nilable(OpenAI::Internal::Type::Converter::Input), + options: T.nilable(OpenAI::RequestOptions::OrHash) + ).returns(T.anything) + end + def request( + method, + path, + query: {}, + headers: {}, + body: nil, + unwrap: nil, + page: nil, + stream: nil, + model: OpenAI::Internal::Type::Unknown, + options: {} + ) + end + + # @api private + sig { returns(String) } + def inspect + end + end + end + end +end diff --git a/rbi/openai/internal/transport/pooled_net_requester.rbi b/rbi/openai/internal/transport/pooled_net_requester.rbi new file mode 100644 index 00000000..245308c5 --- /dev/null +++ b/rbi/openai/internal/transport/pooled_net_requester.rbi @@ -0,0 +1,78 @@ +# typed: strong + +module OpenAI + module Internal + module Transport + # @api private + class PooledNetRequester + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + Request = + T.type_alias do + { + method: Symbol, + url: URI::Generic, + headers: T::Hash[String, String], + body: T.anything, + deadline: Float + } + end + + # from the golang stdlib + # https://github.com/golang/go/blob/c8eced8580028328fde7c03cbfcb720ce15b2358/src/net/http/transport.go#L49 + KEEP_ALIVE_TIMEOUT = 30 + + DEFAULT_MAX_CONNECTIONS = T.let(T.unsafe(nil), Integer) + + class << self + # @api private + sig { params(url: URI::Generic).returns(Net::HTTP) } + def connect(url) + end + + # @api private + sig { params(conn: Net::HTTP, deadline: Float).void } + def calibrate_socket_timeout(conn, deadline) + end + + # @api private + sig do + params( + request: OpenAI::Internal::Transport::PooledNetRequester::Request, + blk: T.proc.params(arg0: String).void + ).returns([Net::HTTPGenericRequest, T.proc.void]) + end + def build_request(request, &blk) + end + end + + # @api private + sig do + params( + url: URI::Generic, + deadline: Float, + blk: T.proc.params(arg0: Net::HTTP).void + ).void + end + private def with_pool(url, deadline:, &blk) + end + + # @api private + sig do + params( + request: OpenAI::Internal::Transport::PooledNetRequester::Request + ).returns([Integer, Net::HTTPResponse, T::Enumerable[String]]) + end + def execute(request) + end + + # @api private + sig { params(size: Integer).returns(T.attached_class) } + def self.new( + size: OpenAI::Internal::Transport::PooledNetRequester::DEFAULT_MAX_CONNECTIONS + ) + end + end + end + end +end diff --git a/rbi/openai/internal/type/array_of.rbi b/rbi/openai/internal/type/array_of.rbi new file mode 100644 index 00000000..28eaab6f --- /dev/null +++ b/rbi/openai/internal/type/array_of.rbi @@ -0,0 +1,104 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + # + # Array of items of a given type. + class ArrayOf + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + abstract! + + Elem = type_member(:out) + + sig do + params( + type_info: + T.any( + OpenAI::Internal::AnyHash, + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::Type::Converter::Input + ), + spec: OpenAI::Internal::AnyHash + ).returns(T.attached_class) + end + def self.[](type_info, spec = {}) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def ===(other) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def ==(other) + end + + sig { returns(Integer) } + def hash + end + + # @api private + sig do + override + .params( + value: T.any(T::Array[T.anything], T.anything), + state: OpenAI::Internal::Type::Converter::CoerceState + ) + .returns(T.any(T::Array[T.anything], T.anything)) + end + def coerce(value, state:) + end + + # @api private + sig do + override + .params( + value: T.any(T::Array[T.anything], T.anything), + state: OpenAI::Internal::Type::Converter::DumpState + ) + .returns(T.any(T::Array[T.anything], T.anything)) + end + def dump(value, state:) + end + + # @api private + sig { returns(T.anything) } + def to_sorbet_type + end + + # @api private + sig { returns(Elem) } + protected def item_type + end + + # @api private + sig { returns(T::Boolean) } + protected def nilable? + end + + # @api private + sig do + params( + type_info: + T.any( + OpenAI::Internal::AnyHash, + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::Type::Converter::Input + ), + spec: OpenAI::Internal::AnyHash + ).void + end + def initialize(type_info, spec = {}) + end + + # @api private + sig { params(depth: Integer).returns(String) } + def inspect(depth: 0) + end + end + end + end +end diff --git a/rbi/openai/internal/type/base_model.rbi b/rbi/openai/internal/type/base_model.rbi new file mode 100644 index 00000000..df16ad79 --- /dev/null +++ b/rbi/openai/internal/type/base_model.rbi @@ -0,0 +1,295 @@ +# typed: strong + +module OpenAI + module Internal + module Type + class BaseModel + extend OpenAI::Internal::Type::Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + abstract! + + KnownField = + T.type_alias do + { + mode: T.nilable(Symbol), + required: T::Boolean, + nilable: T::Boolean + } + end + + OrHash = + T.type_alias do + T.any(OpenAI::Internal::Type::BaseModel, OpenAI::Internal::AnyHash) + end + + class << self + # @api private + # + # Assumes superclass fields are totally defined before fields are accessed / + # defined on subclasses. + sig { params(child: T.self_type).void } + def inherited(child) + end + + # @api private + sig do + returns( + T::Hash[ + Symbol, + T.all( + OpenAI::Internal::Type::BaseModel::KnownField, + { + type_fn: + T.proc.returns(OpenAI::Internal::Type::Converter::Input) + } + ) + ] + ) + end + def known_fields + end + + # @api private + sig do + returns( + T::Hash[ + Symbol, + T.all( + OpenAI::Internal::Type::BaseModel::KnownField, + { type: OpenAI::Internal::Type::Converter::Input } + ) + ] + ) + end + def fields + end + + # @api private + sig do + params( + name_sym: Symbol, + required: T::Boolean, + type_info: + T.any( + { + const: + T.nilable( + T.any(NilClass, T::Boolean, Integer, Float, Symbol) + ), + enum: + T.nilable( + T.proc.returns(OpenAI::Internal::Type::Converter::Input) + ), + union: + T.nilable( + T.proc.returns(OpenAI::Internal::Type::Converter::Input) + ), + api_name: Symbol, + nil?: T::Boolean + }, + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::Type::Converter::Input + ), + spec: OpenAI::Internal::AnyHash + ).void + end + private def add_field(name_sym, required:, type_info:, spec:) + end + + # @api private + sig do + params( + name_sym: Symbol, + type_info: + T.any( + OpenAI::Internal::AnyHash, + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::Type::Converter::Input + ), + spec: OpenAI::Internal::AnyHash + ).void + end + def required(name_sym, type_info, spec = {}) + end + + # @api private + sig do + params( + name_sym: Symbol, + type_info: + T.any( + OpenAI::Internal::AnyHash, + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::Type::Converter::Input + ), + spec: OpenAI::Internal::AnyHash + ).void + end + def optional(name_sym, type_info, spec = {}) + end + + # @api private + # + # `request_only` attributes not excluded from `.#coerce` when receiving responses + # even if well behaved servers should not send them + sig { params(blk: T.proc.void).void } + private def request_only(&blk) + end + + # @api private + # + # `response_only` attributes are omitted from `.#dump` when making requests + sig { params(blk: T.proc.void).void } + private def response_only(&blk) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def ==(other) + end + + sig { returns(Integer) } + def hash + end + end + + sig { params(other: T.anything).returns(T::Boolean) } + def ==(other) + end + + sig { returns(Integer) } + def hash + end + + class << self + # @api private + sig do + override + .params( + value: + T.any( + OpenAI::Internal::Type::BaseModel, + T::Hash[T.anything, T.anything], + T.anything + ), + state: OpenAI::Internal::Type::Converter::CoerceState + ) + .returns(T.any(T.attached_class, T.anything)) + end + def coerce(value, state:) + end + + # @api private + sig do + override + .params( + value: T.any(T.attached_class, T.anything), + state: OpenAI::Internal::Type::Converter::DumpState + ) + .returns(T.any(T::Hash[T.anything, T.anything], T.anything)) + end + def dump(value, state:) + end + + # @api private + sig { returns(T.anything) } + def to_sorbet_type + end + end + + class << self + # @api private + sig do + params( + model: OpenAI::Internal::Type::BaseModel, + convert: T::Boolean + ).returns(OpenAI::Internal::AnyHash) + end + def recursively_to_h(model, convert:) + end + end + + # Returns the raw value associated with the given key, if found. Otherwise, nil is + # returned. + # + # It is valid to lookup keys that are not in the API spec, for example to access + # undocumented features. This method does not parse response data into + # higher-level types. Lookup by anything other than a Symbol is an ArgumentError. + sig { params(key: Symbol).returns(T.nilable(T.anything)) } + def [](key) + end + + # Returns a Hash of the data underlying this object. O(1) + # + # Keys are Symbols and values are the raw values from the response. The return + # value indicates which values were ever set on the object. i.e. there will be a + # key in this hash if they ever were, even if the set value was nil. + # + # This method is not recursive. The returned value is shared by the object, so it + # should not be mutated. + sig { overridable.returns(OpenAI::Internal::AnyHash) } + def to_h + end + + # Returns a Hash of the data underlying this object. O(1) + # + # Keys are Symbols and values are the raw values from the response. The return + # value indicates which values were ever set on the object. i.e. there will be a + # key in this hash if they ever were, even if the set value was nil. + # + # This method is not recursive. The returned value is shared by the object, so it + # should not be mutated. + sig { overridable.returns(OpenAI::Internal::AnyHash) } + def to_hash + end + + # In addition to the behaviour of `#to_h`, this method will recursively call + # `#to_h` on nested models. + sig { overridable.returns(OpenAI::Internal::AnyHash) } + def deep_to_h + end + + sig do + params(keys: T.nilable(T::Array[Symbol])).returns( + OpenAI::Internal::AnyHash + ) + end + def deconstruct_keys(keys) + end + + sig { params(a: T.anything).returns(String) } + def to_json(*a) + end + + sig { params(a: T.anything).returns(String) } + def to_yaml(*a) + end + + # Create a new instance of a model. + sig do + params(data: T.any(T::Hash[Symbol, T.anything], T.self_type)).returns( + T.attached_class + ) + end + def self.new(data = {}) + end + + class << self + # @api private + sig { params(depth: Integer).returns(String) } + def inspect(depth: 0) + end + end + + sig { returns(String) } + def to_s + end + + # @api private + sig { returns(String) } + def inspect + end + end + end + end +end diff --git a/rbi/openai/internal/type/base_page.rbi b/rbi/openai/internal/type/base_page.rbi new file mode 100644 index 00000000..c097c095 --- /dev/null +++ b/rbi/openai/internal/type/base_page.rbi @@ -0,0 +1,42 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + # + # This module provides a base implementation for paginated responses in the SDK. + module BasePage + Elem = type_member(:out) + + sig { overridable.returns(T::Boolean) } + def next_page? + end + + sig { overridable.returns(T.self_type) } + def next_page + end + + sig { overridable.params(blk: T.proc.params(arg0: Elem).void).void } + def auto_paging_each(&blk) + end + + sig { returns(T::Enumerable[Elem]) } + def to_enum + end + + # @api private + sig do + params( + client: OpenAI::Internal::Transport::BaseClient, + req: OpenAI::Internal::Transport::BaseClient::RequestComponents, + headers: T.any(T::Hash[String, String], Net::HTTPHeader), + page_data: T.anything + ).void + end + def initialize(client:, req:, headers:, page_data:) + end + end + end + end +end diff --git a/rbi/openai/internal/type/base_stream.rbi b/rbi/openai/internal/type/base_stream.rbi new file mode 100644 index 00000000..e1155943 --- /dev/null +++ b/rbi/openai/internal/type/base_stream.rbi @@ -0,0 +1,75 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + # + # This module provides a base implementation for streaming responses in the SDK. + module BaseStream + include Enumerable + + Message = type_member(:in) + Elem = type_member(:out) + + class << self + # Attempt to close the underlying transport when the stream itself is garbage + # collected. + # + # This should not be relied upon for resource clean up, as the garbage collector + # is not guaranteed to run. + sig do + params(stream: T::Enumerable[T.anything]).returns( + T.proc.params(arg0: Integer).void + ) + end + def defer_closing(stream) + end + end + + sig { void } + def close + end + + # @api private + sig { overridable.returns(T::Enumerable[Elem]) } + private def iterator + end + + sig { params(blk: T.proc.params(arg0: Elem).void).void } + def each(&blk) + end + + sig { returns(T::Enumerator[Elem]) } + def to_enum + end + + # @api private + sig do + params( + model: + T.any(T::Class[T.anything], OpenAI::Internal::Type::Converter), + url: URI::Generic, + status: Integer, + response: Net::HTTPResponse, + unwrap: + T.any( + Symbol, + Integer, + T::Array[T.any(Symbol, Integer)], + T.proc.params(arg0: T.anything).returns(T.anything) + ), + stream: T::Enumerable[Message] + ).void + end + def initialize(model:, url:, status:, response:, unwrap:, stream:) + end + + # @api private + sig { returns(String) } + def inspect + end + end + end + end +end diff --git a/rbi/openai/internal/type/boolean.rbi b/rbi/openai/internal/type/boolean.rbi new file mode 100644 index 00000000..8324ff24 --- /dev/null +++ b/rbi/openai/internal/type/boolean.rbi @@ -0,0 +1,58 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + # + # Ruby has no Boolean class; this is something for models to refer to. + class Boolean + extend OpenAI::Internal::Type::Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + abstract! + + sig { params(other: T.anything).returns(T::Boolean) } + def self.===(other) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def self.==(other) + end + + class << self + # @api private + # + # Coerce value to Boolean if possible, otherwise return the original value. + sig do + override + .params( + value: T.any(T::Boolean, T.anything), + state: OpenAI::Internal::Type::Converter::CoerceState + ) + .returns(T.any(T::Boolean, T.anything)) + end + def coerce(value, state:) + end + + # @api private + sig do + override + .params( + value: T.any(T::Boolean, T.anything), + state: OpenAI::Internal::Type::Converter::DumpState + ) + .returns(T.any(T::Boolean, T.anything)) + end + def dump(value, state:) + end + + # @api private + sig { returns(T.anything) } + def to_sorbet_type + end + end + end + end + end +end diff --git a/rbi/openai/internal/type/converter.rbi b/rbi/openai/internal/type/converter.rbi new file mode 100644 index 00000000..4c746081 --- /dev/null +++ b/rbi/openai/internal/type/converter.rbi @@ -0,0 +1,204 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + module Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + Input = + T.type_alias do + T.any(OpenAI::Internal::Type::Converter, T::Class[T.anything]) + end + + CoerceState = + T.type_alias do + { + translate_names: T::Boolean, + strictness: T::Boolean, + exactness: { + yes: Integer, + no: Integer, + maybe: Integer + }, + error: T::Class[StandardError], + branched: Integer + } + end + + DumpState = T.type_alias { { can_retry: T::Boolean } } + + # @api private + sig do + overridable + .params( + value: T.anything, + state: OpenAI::Internal::Type::Converter::CoerceState + ) + .returns(T.anything) + end + def coerce(value, state:) + end + + # @api private + sig do + overridable + .params( + value: T.anything, + state: OpenAI::Internal::Type::Converter::DumpState + ) + .returns(T.anything) + end + def dump(value, state:) + end + + # @api private + sig { params(depth: Integer).returns(String) } + def inspect(depth: 0) + end + + class << self + # @api private + sig do + params( + spec: + T.any( + { + const: + T.nilable( + T.any(NilClass, T::Boolean, Integer, Float, Symbol) + ), + enum: + T.nilable( + T.proc.returns(OpenAI::Internal::Type::Converter::Input) + ), + union: + T.nilable( + T.proc.returns(OpenAI::Internal::Type::Converter::Input) + ) + }, + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::Type::Converter::Input + ) + ).returns(T.proc.returns(T.anything)) + end + def self.type_info(spec) + end + + # @api private + sig do + params( + type_info: + T.any( + { + const: + T.nilable( + T.any(NilClass, T::Boolean, Integer, Float, Symbol) + ), + enum: + T.nilable( + T.proc.returns(OpenAI::Internal::Type::Converter::Input) + ), + union: + T.nilable( + T.proc.returns(OpenAI::Internal::Type::Converter::Input) + ) + }, + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::Type::Converter::Input + ), + spec: + T.any( + { + const: + T.nilable( + T.any(NilClass, T::Boolean, Integer, Float, Symbol) + ), + enum: + T.nilable( + T.proc.returns(OpenAI::Internal::Type::Converter::Input) + ), + union: + T.nilable( + T.proc.returns(OpenAI::Internal::Type::Converter::Input) + ) + }, + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::Type::Converter::Input + ) + ).returns(OpenAI::Internal::AnyHash) + end + def self.meta_info(type_info, spec) + end + + # @api private + sig do + params(translate_names: T::Boolean).returns( + OpenAI::Internal::Type::Converter::CoerceState + ) + end + def self.new_coerce_state(translate_names: true) + end + + # @api private + # + # Based on `target`, transform `value` into `target`, to the extent possible: + # + # 1. if the given `value` conforms to `target` already, return the given `value` + # 2. if it's possible and safe to convert the given `value` to `target`, then the + # converted value + # 3. otherwise, the given `value` unaltered + # + # The coercion process is subject to improvement between minor release versions. + # See https://docs.pydantic.dev/latest/concepts/unions/#smart-mode + sig do + params( + target: OpenAI::Internal::Type::Converter::Input, + value: T.anything, + state: OpenAI::Internal::Type::Converter::CoerceState + ).returns(T.anything) + end + def self.coerce( + target, + value, + # The `strictness` is one of `true`, `false`. This informs the coercion strategy + # when we have to decide between multiple possible conversion targets: + # + # - `true`: the conversion must be exact, with minimum coercion. + # - `false`: the conversion can be approximate, with some coercion. + # + # The `exactness` is `Hash` with keys being one of `yes`, `no`, or `maybe`. For + # any given conversion attempt, the exactness will be updated based on how closely + # the value recursively matches the target type: + # + # - `yes`: the value can be converted to the target type with minimum coercion. + # - `maybe`: the value can be converted to the target type with some reasonable + # coercion. + # - `no`: the value cannot be converted to the target type. + # + # See implementation below for more details. + state: OpenAI::Internal::Type::Converter.new_coerce_state + ) + end + + # @api private + sig do + params( + target: OpenAI::Internal::Type::Converter::Input, + value: T.anything, + state: OpenAI::Internal::Type::Converter::DumpState + ).returns(T.anything) + end + def self.dump(target, value, state: { can_retry: true }) + end + + # @api private + sig { params(target: T.anything, depth: Integer).returns(String) } + def self.inspect(target, depth:) + end + end + end + end + end +end diff --git a/rbi/openai/internal/type/enum.rbi b/rbi/openai/internal/type/enum.rbi new file mode 100644 index 00000000..fa242fb4 --- /dev/null +++ b/rbi/openai/internal/type/enum.rbi @@ -0,0 +1,82 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + # + # A value from among a specified list of options. OpenAPI enum values map to Ruby + # values in the SDK as follows: + # + # 1. boolean => true | false + # 2. integer => Integer + # 3. float => Float + # 4. string => Symbol + # + # We can therefore convert string values to Symbols, but can't convert other + # values safely. + module Enum + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + # All of the valid Symbol values for this enum. + sig do + overridable.returns( + T::Array[T.any(NilClass, T::Boolean, Integer, Float, Symbol)] + ) + end + def values + end + + sig { params(other: T.anything).returns(T::Boolean) } + def ===(other) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def ==(other) + end + + sig { returns(Integer) } + def hash + end + + # @api private + # + # Unlike with primitives, `Enum` additionally validates that the value is a member + # of the enum. + sig do + override + .params( + value: T.any(String, Symbol, T.anything), + state: OpenAI::Internal::Type::Converter::CoerceState + ) + .returns(T.any(Symbol, T.anything)) + end + def coerce(value, state:) + end + + # @api private + sig do + override + .params( + value: T.any(Symbol, T.anything), + state: OpenAI::Internal::Type::Converter::DumpState + ) + .returns(T.any(Symbol, T.anything)) + end + def dump(value, state:) + end + + # @api private + sig { returns(T.anything) } + def to_sorbet_type + end + + # @api private + sig { params(depth: Integer).returns(String) } + def inspect(depth: 0) + end + end + end + end +end diff --git a/rbi/openai/internal/type/file_input.rbi b/rbi/openai/internal/type/file_input.rbi new file mode 100644 index 00000000..09dde1b1 --- /dev/null +++ b/rbi/openai/internal/type/file_input.rbi @@ -0,0 +1,59 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + # + # Either `Pathname` or `StringIO`, or `IO`, or + # `OpenAI::Internal::Type::FileInput`. + # + # Note: when `IO` is used, all retries are disabled, since many IO` streams are + # not rewindable. + class FileInput + extend OpenAI::Internal::Type::Converter + + abstract! + + sig { params(other: T.anything).returns(T::Boolean) } + def self.===(other) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def self.==(other) + end + + class << self + # @api private + sig do + override + .params( + value: T.any(StringIO, String, T.anything), + state: OpenAI::Internal::Type::Converter::CoerceState + ) + .returns(T.any(StringIO, T.anything)) + end + def coerce(value, state:) + end + + # @api private + sig do + override + .params( + value: T.any(Pathname, StringIO, IO, String, T.anything), + state: OpenAI::Internal::Type::Converter::DumpState + ) + .returns(T.any(Pathname, StringIO, IO, String, T.anything)) + end + def dump(value, state:) + end + + # @api private + sig { returns(T.anything) } + def to_sorbet_type + end + end + end + end + end +end diff --git a/rbi/openai/internal/type/hash_of.rbi b/rbi/openai/internal/type/hash_of.rbi new file mode 100644 index 00000000..d9f9f9ec --- /dev/null +++ b/rbi/openai/internal/type/hash_of.rbi @@ -0,0 +1,104 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + # + # Hash of items of a given type. + class HashOf + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + abstract! + + Elem = type_member(:out) + + sig do + params( + type_info: + T.any( + OpenAI::Internal::AnyHash, + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::Type::Converter::Input + ), + spec: OpenAI::Internal::AnyHash + ).returns(T.attached_class) + end + def self.[](type_info, spec = {}) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def ===(other) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def ==(other) + end + + sig { returns(Integer) } + def hash + end + + # @api private + sig do + override + .params( + value: T.any(T::Hash[T.anything, T.anything], T.anything), + state: OpenAI::Internal::Type::Converter::CoerceState + ) + .returns(T.any(OpenAI::Internal::AnyHash, T.anything)) + end + def coerce(value, state:) + end + + # @api private + sig do + override + .params( + value: T.any(T::Hash[T.anything, T.anything], T.anything), + state: OpenAI::Internal::Type::Converter::DumpState + ) + .returns(T.any(OpenAI::Internal::AnyHash, T.anything)) + end + def dump(value, state:) + end + + # @api private + sig { returns(T.anything) } + def to_sorbet_type + end + + # @api private + sig { returns(Elem) } + protected def item_type + end + + # @api private + sig { returns(T::Boolean) } + protected def nilable? + end + + # @api private + sig do + params( + type_info: + T.any( + OpenAI::Internal::AnyHash, + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::Type::Converter::Input + ), + spec: OpenAI::Internal::AnyHash + ).void + end + def initialize(type_info, spec = {}) + end + + # @api private + sig { params(depth: Integer).returns(String) } + def inspect(depth: 0) + end + end + end + end +end diff --git a/rbi/openai/internal/type/request_parameters.rbi b/rbi/openai/internal/type/request_parameters.rbi new file mode 100644 index 00000000..45ab8328 --- /dev/null +++ b/rbi/openai/internal/type/request_parameters.rbi @@ -0,0 +1,29 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + module RequestParameters + # Options to specify HTTP behaviour for this request. + sig { returns(OpenAI::RequestOptions) } + attr_reader :request_options + + sig { params(request_options: OpenAI::RequestOptions::OrHash).void } + attr_writer :request_options + + # @api private + module Converter + # @api private + sig do + params(params: T.anything).returns( + [T.anything, OpenAI::Internal::AnyHash] + ) + end + def dump_request(params) + end + end + end + end + end +end diff --git a/rbi/openai/internal/type/union.rbi b/rbi/openai/internal/type/union.rbi new file mode 100644 index 00000000..4aa6836b --- /dev/null +++ b/rbi/openai/internal/type/union.rbi @@ -0,0 +1,126 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + module Union + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + # @api private + # + # All of the specified variant info for this union. + sig do + returns( + T::Array[ + [ + T.nilable(Symbol), + T.proc.returns(OpenAI::Internal::Type::Converter::Input), + OpenAI::Internal::AnyHash + ] + ] + ) + end + private def known_variants + end + + # @api private + sig do + returns( + T::Array[[T.nilable(Symbol), T.anything, OpenAI::Internal::AnyHash]] + ) + end + protected def derefed_variants + end + + # All of the specified variants for this union. + sig { overridable.returns(T::Array[T.anything]) } + def variants + end + + # @api private + sig { params(property: Symbol).void } + private def discriminator(property) + end + + # @api private + sig do + params( + key: + T.any( + Symbol, + OpenAI::Internal::AnyHash, + T.proc.returns(T.anything), + T.anything + ), + spec: + T.any( + OpenAI::Internal::AnyHash, + T.proc.returns(T.anything), + T.anything + ) + ).void + end + private def variant(key, spec = nil) + end + + # @api private + sig { params(value: T.anything).returns(T.nilable(T.anything)) } + private def resolve_variant(value) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def ===(other) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def ==(other) + end + + sig { returns(Integer) } + def hash + end + + # @api private + # + # Tries to efficiently coerce the given value to one of the known variants. + # + # If the value cannot match any of the known variants, the coercion is considered + # non-viable and returns the original value. + sig do + override + .params( + value: T.anything, + state: OpenAI::Internal::Type::Converter::CoerceState + ) + .returns(T.anything) + end + def coerce(value, state:) + end + + # @api private + sig do + override + .params( + value: T.anything, + state: OpenAI::Internal::Type::Converter::DumpState + ) + .returns(T.anything) + end + def dump(value, state:) + end + + # @api private + sig { returns(T.anything) } + def to_sorbet_type + end + + # @api private + sig { params(depth: Integer).returns(String) } + def inspect(depth: 0) + end + end + end + end +end diff --git a/rbi/openai/internal/type/unknown.rbi b/rbi/openai/internal/type/unknown.rbi new file mode 100644 index 00000000..9f4045b3 --- /dev/null +++ b/rbi/openai/internal/type/unknown.rbi @@ -0,0 +1,58 @@ +# typed: strong + +module OpenAI + module Internal + module Type + # @api private + # + # When we don't know what to expect for the value. + class Unknown + extend OpenAI::Internal::Type::Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + abstract! + + sig { params(other: T.anything).returns(T::Boolean) } + def self.===(other) + end + + sig { params(other: T.anything).returns(T::Boolean) } + def self.==(other) + end + + class << self + # @api private + # + # No coercion needed for Unknown type. + sig do + override + .params( + value: T.anything, + state: OpenAI::Internal::Type::Converter::CoerceState + ) + .returns(T.anything) + end + def coerce(value, state:) + end + + # @api private + sig do + override + .params( + value: T.anything, + state: OpenAI::Internal::Type::Converter::DumpState + ) + .returns(T.anything) + end + def dump(value, state:) + end + + # @api private + sig { returns(T.anything) } + def to_sorbet_type + end + end + end + end + end +end diff --git a/rbi/openai/internal/util.rbi b/rbi/openai/internal/util.rbi new file mode 100644 index 00000000..69ba15c5 --- /dev/null +++ b/rbi/openai/internal/util.rbi @@ -0,0 +1,484 @@ +# typed: strong + +module OpenAI + module Internal + # @api private + module Util + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + # @api private + sig { returns(Float) } + def self.monotonic_secs + end + + # @api private + sig do + params(ns: T.any(Module, T::Class[T.anything])).returns( + T::Enumerable[T.any(Module, T::Class[T.anything])] + ) + end + def self.walk_namespaces(ns) + end + + class << self + # @api private + sig { returns(String) } + def arch + end + + # @api private + sig { returns(String) } + def os + end + end + + class << self + # @api private + sig { params(input: T.anything).returns(T::Boolean) } + def primitive?(input) + end + + # @api private + sig do + params(input: T.any(String, T::Boolean)).returns( + T.any(T::Boolean, T.anything) + ) + end + def coerce_boolean(input) + end + + # @api private + sig do + params(input: T.any(String, T::Boolean)).returns( + T.nilable(T::Boolean) + ) + end + def coerce_boolean!(input) + end + + # @api private + sig do + params(input: T.any(String, Integer)).returns( + T.any(Integer, T.anything) + ) + end + def coerce_integer(input) + end + + # @api private + sig do + params(input: T.any(String, Integer, Float)).returns( + T.any(Float, T.anything) + ) + end + def coerce_float(input) + end + + # @api private + sig do + params(input: T.anything).returns( + T.any(T::Hash[T.anything, T.anything], T.anything) + ) + end + def coerce_hash(input) + end + + # @api private + sig do + params(input: T.anything).returns( + T.nilable(T::Hash[T.anything, T.anything]) + ) + end + def coerce_hash!(input) + end + end + + class << self + # @api private + sig do + params(lhs: T.anything, rhs: T.anything, concat: T::Boolean).returns( + T.anything + ) + end + private def deep_merge_lr(lhs, rhs, concat: false) + end + + # @api private + # + # Recursively merge one hash with another. If the values at a given key are not + # both hashes, just take the new value. + sig do + params( + values: T::Array[T.anything], + sentinel: T.nilable(T.anything), + concat: T::Boolean + ).returns(T.anything) + end + def deep_merge( + *values, + # the value to return if no values are provided. + sentinel: nil, + # whether to merge sequences by concatenation. + concat: false + ) + end + + # @api private + sig do + params( + data: + T.any( + OpenAI::Internal::AnyHash, + T::Array[T.anything], + T.anything + ), + pick: + T.nilable( + T.any( + Symbol, + Integer, + T::Array[T.any(Symbol, Integer)], + T.proc.params(arg0: T.anything).returns(T.anything) + ) + ), + blk: T.nilable(T.proc.returns(T.anything)) + ).returns(T.nilable(T.anything)) + end + def dig(data, pick, &blk) + end + end + + class << self + # @api private + sig { params(uri: URI::Generic).returns(String) } + def uri_origin(uri) + end + + # @api private + sig { params(path: T.any(String, T::Array[String])).returns(String) } + def interpolate_path(path) + end + end + + class << self + # @api private + sig do + params(query: T.nilable(String)).returns( + T::Hash[String, T::Array[String]] + ) + end + def decode_query(query) + end + + # @api private + sig do + params( + query: + T.nilable( + T::Hash[String, T.nilable(T.any(T::Array[String], String))] + ) + ).returns(T.nilable(String)) + end + def encode_query(query) + end + end + + ParsedUri = + T.type_alias do + { + scheme: T.nilable(String), + host: T.nilable(String), + port: T.nilable(Integer), + path: T.nilable(String), + query: T::Hash[String, T::Array[String]] + } + end + + class << self + # @api private + sig do + params(url: T.any(URI::Generic, String)).returns( + OpenAI::Internal::Util::ParsedUri + ) + end + def parse_uri(url) + end + + # @api private + sig do + params(parsed: OpenAI::Internal::Util::ParsedUri).returns( + URI::Generic + ) + end + def unparse_uri(parsed) + end + + # @api private + sig do + params( + lhs: OpenAI::Internal::Util::ParsedUri, + rhs: OpenAI::Internal::Util::ParsedUri + ).returns(URI::Generic) + end + def join_parsed_uri(lhs, rhs) + end + end + + class << self + # @api private + sig do + params( + headers: + T::Hash[ + String, + T.nilable( + T.any( + String, + Integer, + T::Array[T.nilable(T.any(String, Integer))] + ) + ) + ] + ).returns(T::Hash[String, String]) + end + def normalized_headers(*headers) + end + end + + # @api private + # + # An adapter that satisfies the IO interface required by `::IO.copy_stream` + class ReadIOAdapter + # @api private + sig { returns(T.nilable(T::Boolean)) } + def close? + end + + # @api private + sig { void } + def close + end + + # @api private + sig { params(max_len: T.nilable(Integer)).returns(String) } + private def read_enum(max_len) + end + + # @api private + sig do + params( + max_len: T.nilable(Integer), + out_string: T.nilable(String) + ).returns(T.nilable(String)) + end + def read(max_len = nil, out_string = nil) + end + + # @api private + sig do + params( + src: T.any(String, Pathname, StringIO, T::Enumerable[String]), + blk: T.proc.params(arg0: String).void + ).returns(T.attached_class) + end + def self.new(src, &blk) + end + end + + class << self + sig do + params(blk: T.proc.params(y: Enumerator::Yielder).void).returns( + T::Enumerable[String] + ) + end + def writable_enum(&blk) + end + end + + JSON_CONTENT = + T.let(%r{^application/(?:vnd(?:\.[^.]+)*\+)?json(?!l)}, Regexp) + JSONL_CONTENT = + T.let(%r{^application/(:?x-(?:n|l)djson)|(:?(?:x-)?jsonl)}, Regexp) + + class << self + # @api private + sig do + params( + y: Enumerator::Yielder, + val: T.anything, + closing: T::Array[T.proc.void], + content_type: T.nilable(String) + ).void + end + private def write_multipart_content( + y, + val:, + closing:, + content_type: nil + ) + end + + # @api private + sig do + params( + y: Enumerator::Yielder, + boundary: String, + key: T.any(Symbol, String), + val: T.anything, + closing: T::Array[T.proc.void] + ).void + end + private def write_multipart_chunk(y, boundary:, key:, val:, closing:) + end + + # @api private + # + # https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.1.md#special-considerations-for-multipart-content + sig do + params(body: T.anything).returns([String, T::Enumerable[String]]) + end + private def encode_multipart_streaming(body) + end + + # @api private + sig do + params(headers: T::Hash[String, String], body: T.anything).returns( + T.anything + ) + end + def encode_content(headers, body) + end + + # @api private + # + # https://www.iana.org/assignments/character-sets/character-sets.xhtml + sig { params(content_type: String, text: String).void } + def force_charset!(content_type, text:) + end + + # @api private + # + # Assumes each chunk in stream has `Encoding::BINARY`. + sig do + params( + headers: T.any(T::Hash[String, String], Net::HTTPHeader), + stream: T::Enumerable[String], + suppress_error: T::Boolean + ).returns(T.anything) + end + def decode_content(headers, stream:, suppress_error: false) + end + end + + class << self + # @api private + # + # https://doc.rust-lang.org/std/iter/trait.FusedIterator.html + sig do + params( + enum: T::Enumerable[T.anything], + external: T::Boolean, + close: T.proc.void + ).returns(T::Enumerable[T.anything]) + end + def fused_enum(enum, external: false, &close) + end + + # @api private + sig { params(enum: T.nilable(T::Enumerable[T.anything])).void } + def close_fused!(enum) + end + + # @api private + sig do + params( + enum: T.nilable(T::Enumerable[T.anything]), + blk: T.proc.params(arg0: Enumerator::Yielder).void + ).returns(T::Enumerable[T.anything]) + end + def chain_fused(enum, &blk) + end + end + + ServerSentEvent = + T.type_alias do + { + event: T.nilable(String), + data: T.nilable(String), + id: T.nilable(String), + retry: T.nilable(Integer) + } + end + + class << self + # @api private + # + # Assumes Strings have been forced into having `Encoding::BINARY`. + # + # This decoder is responsible for reassembling lines split across multiple + # fragments. + sig do + params(enum: T::Enumerable[String]).returns(T::Enumerable[String]) + end + def decode_lines(enum) + end + + # @api private + # + # https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream + # + # Assumes that `lines` has been decoded with `#decode_lines`. + sig do + params(lines: T::Enumerable[String]).returns( + T::Enumerable[OpenAI::Internal::Util::ServerSentEvent] + ) + end + def decode_sse(lines) + end + end + + # @api private + module SorbetRuntimeSupport + class MissingSorbetRuntimeError < ::RuntimeError + end + + # @api private + sig { returns(T::Hash[Symbol, T.anything]) } + private def sorbet_runtime_constants + end + + # @api private + sig { params(name: Symbol).void } + def const_missing(name) + end + + # @api private + sig { params(name: Symbol).returns(T::Boolean) } + def sorbet_constant_defined?(name) + end + + # @api private + sig { params(name: Symbol, blk: T.proc.returns(T.anything)).void } + def define_sorbet_constant!(name, &blk) + end + + # @api private + sig { returns(T.anything) } + def to_sorbet_type + end + + class << self + # @api private + sig do + params( + type: + T.any(OpenAI::Internal::Util::SorbetRuntimeSupport, T.anything) + ).returns(T.anything) + end + def to_sorbet_type(type) + end + end + end + end + end +end diff --git a/rbi/openai/models.rbi b/rbi/openai/models.rbi new file mode 100644 index 00000000..54cd5582 --- /dev/null +++ b/rbi/openai/models.rbi @@ -0,0 +1,224 @@ +# typed: strong + +module OpenAI + AllModels = OpenAI::Models::AllModels + + Audio = OpenAI::Models::Audio + + AudioModel = OpenAI::Models::AudioModel + + AudioResponseFormat = OpenAI::Models::AudioResponseFormat + + AutoFileChunkingStrategyParam = OpenAI::Models::AutoFileChunkingStrategyParam + + Batch = OpenAI::Models::Batch + + BatchCancelParams = OpenAI::Models::BatchCancelParams + + BatchCreateParams = OpenAI::Models::BatchCreateParams + + BatchError = OpenAI::Models::BatchError + + BatchListParams = OpenAI::Models::BatchListParams + + BatchRequestCounts = OpenAI::Models::BatchRequestCounts + + BatchRetrieveParams = OpenAI::Models::BatchRetrieveParams + + Beta = OpenAI::Models::Beta + + Chat = OpenAI::Models::Chat + + ChatModel = OpenAI::Models::ChatModel + + ComparisonFilter = OpenAI::Models::ComparisonFilter + + Completion = OpenAI::Models::Completion + + CompletionChoice = OpenAI::Models::CompletionChoice + + CompletionCreateParams = OpenAI::Models::CompletionCreateParams + + CompletionUsage = OpenAI::Models::CompletionUsage + + CompoundFilter = OpenAI::Models::CompoundFilter + + ContainerCreateParams = OpenAI::Models::ContainerCreateParams + + ContainerDeleteParams = OpenAI::Models::ContainerDeleteParams + + ContainerListParams = OpenAI::Models::ContainerListParams + + ContainerRetrieveParams = OpenAI::Models::ContainerRetrieveParams + + Containers = OpenAI::Models::Containers + + Conversations = OpenAI::Models::Conversations + + CreateEmbeddingResponse = OpenAI::Models::CreateEmbeddingResponse + + CustomToolInputFormat = OpenAI::Models::CustomToolInputFormat + + Embedding = OpenAI::Models::Embedding + + EmbeddingCreateParams = OpenAI::Models::EmbeddingCreateParams + + EmbeddingModel = OpenAI::Models::EmbeddingModel + + ErrorObject = OpenAI::Models::ErrorObject + + EvalCreateParams = OpenAI::Models::EvalCreateParams + + EvalCustomDataSourceConfig = OpenAI::Models::EvalCustomDataSourceConfig + + EvalDeleteParams = OpenAI::Models::EvalDeleteParams + + EvalListParams = OpenAI::Models::EvalListParams + + EvalRetrieveParams = OpenAI::Models::EvalRetrieveParams + + Evals = OpenAI::Models::Evals + + EvalStoredCompletionsDataSourceConfig = + OpenAI::Models::EvalStoredCompletionsDataSourceConfig + + EvalUpdateParams = OpenAI::Models::EvalUpdateParams + + FileChunkingStrategy = OpenAI::Models::FileChunkingStrategy + + FileChunkingStrategyParam = OpenAI::Models::FileChunkingStrategyParam + + FileContent = OpenAI::Models::FileContent + + FileContentParams = OpenAI::Models::FileContentParams + + FileCreateParams = OpenAI::Models::FileCreateParams + + FileDeleted = OpenAI::Models::FileDeleted + + FileDeleteParams = OpenAI::Models::FileDeleteParams + + FileListParams = OpenAI::Models::FileListParams + + FileObject = OpenAI::Models::FileObject + + FilePurpose = OpenAI::Models::FilePurpose + + FileRetrieveParams = OpenAI::Models::FileRetrieveParams + + FineTuning = OpenAI::Models::FineTuning + + FunctionDefinition = OpenAI::Models::FunctionDefinition + + FunctionParameters = + T.let(OpenAI::Models::FunctionParameters, OpenAI::Internal::Type::Converter) + + Graders = OpenAI::Models::Graders + + Image = OpenAI::Models::Image + + ImageCreateVariationParams = OpenAI::Models::ImageCreateVariationParams + + ImageEditCompletedEvent = OpenAI::Models::ImageEditCompletedEvent + + ImageEditParams = OpenAI::Models::ImageEditParams + + ImageEditPartialImageEvent = OpenAI::Models::ImageEditPartialImageEvent + + ImageEditStreamEvent = OpenAI::Models::ImageEditStreamEvent + + ImageGenCompletedEvent = OpenAI::Models::ImageGenCompletedEvent + + ImageGenerateParams = OpenAI::Models::ImageGenerateParams + + ImageGenPartialImageEvent = OpenAI::Models::ImageGenPartialImageEvent + + ImageGenStreamEvent = OpenAI::Models::ImageGenStreamEvent + + ImageModel = OpenAI::Models::ImageModel + + ImagesResponse = OpenAI::Models::ImagesResponse + + Metadata = T.let(OpenAI::Models::Metadata, OpenAI::Internal::Type::Converter) + + Model = OpenAI::Models::Model + + ModelDeleted = OpenAI::Models::ModelDeleted + + ModelDeleteParams = OpenAI::Models::ModelDeleteParams + + ModelListParams = OpenAI::Models::ModelListParams + + ModelRetrieveParams = OpenAI::Models::ModelRetrieveParams + + Moderation = OpenAI::Models::Moderation + + ModerationCreateParams = OpenAI::Models::ModerationCreateParams + + ModerationImageURLInput = OpenAI::Models::ModerationImageURLInput + + ModerationModel = OpenAI::Models::ModerationModel + + ModerationMultiModalInput = OpenAI::Models::ModerationMultiModalInput + + ModerationTextInput = OpenAI::Models::ModerationTextInput + + OtherFileChunkingStrategyObject = + OpenAI::Models::OtherFileChunkingStrategyObject + + Reasoning = OpenAI::Models::Reasoning + + ReasoningEffort = OpenAI::Models::ReasoningEffort + + ResponseFormatJSONObject = OpenAI::Models::ResponseFormatJSONObject + + ResponseFormatJSONSchema = OpenAI::Models::ResponseFormatJSONSchema + + ResponseFormatText = OpenAI::Models::ResponseFormatText + + ResponseFormatTextGrammar = OpenAI::Models::ResponseFormatTextGrammar + + ResponseFormatTextPython = OpenAI::Models::ResponseFormatTextPython + + Responses = OpenAI::Models::Responses + + ResponsesModel = OpenAI::Models::ResponsesModel + + StaticFileChunkingStrategy = OpenAI::Models::StaticFileChunkingStrategy + + StaticFileChunkingStrategyObject = + OpenAI::Models::StaticFileChunkingStrategyObject + + StaticFileChunkingStrategyObjectParam = + OpenAI::Models::StaticFileChunkingStrategyObjectParam + + Upload = OpenAI::Models::Upload + + UploadCancelParams = OpenAI::Models::UploadCancelParams + + UploadCompleteParams = OpenAI::Models::UploadCompleteParams + + UploadCreateParams = OpenAI::Models::UploadCreateParams + + Uploads = OpenAI::Models::Uploads + + VectorStore = OpenAI::Models::VectorStore + + VectorStoreCreateParams = OpenAI::Models::VectorStoreCreateParams + + VectorStoreDeleted = OpenAI::Models::VectorStoreDeleted + + VectorStoreDeleteParams = OpenAI::Models::VectorStoreDeleteParams + + VectorStoreListParams = OpenAI::Models::VectorStoreListParams + + VectorStoreRetrieveParams = OpenAI::Models::VectorStoreRetrieveParams + + VectorStores = OpenAI::Models::VectorStores + + VectorStoreSearchParams = OpenAI::Models::VectorStoreSearchParams + + VectorStoreUpdateParams = OpenAI::Models::VectorStoreUpdateParams + + Webhooks = OpenAI::Models::Webhooks +end diff --git a/rbi/openai/models/all_models.rbi b/rbi/openai/models/all_models.rbi new file mode 100644 index 00000000..34231363 --- /dev/null +++ b/rbi/openai/models/all_models.rbi @@ -0,0 +1,83 @@ +# typed: strong + +module OpenAI + module Models + module AllModels + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::ChatModel::TaggedSymbol, + OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol + ) + end + + module ResponsesOnlyModel + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::AllModels::ResponsesOnlyModel) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + O1_PRO = + T.let(:"o1-pro", OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol) + O1_PRO_2025_03_19 = + T.let( + :"o1-pro-2025-03-19", + OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol + ) + O3_PRO = + T.let(:"o3-pro", OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol) + O3_PRO_2025_06_10 = + T.let( + :"o3-pro-2025-06-10", + OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol + ) + O3_DEEP_RESEARCH = + T.let( + :"o3-deep-research", + OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol + ) + O3_DEEP_RESEARCH_2025_06_26 = + T.let( + :"o3-deep-research-2025-06-26", + OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol + ) + O4_MINI_DEEP_RESEARCH = + T.let( + :"o4-mini-deep-research", + OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol + ) + O4_MINI_DEEP_RESEARCH_2025_06_26 = + T.let( + :"o4-mini-deep-research-2025-06-26", + OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol + ) + COMPUTER_USE_PREVIEW = + T.let( + :"computer-use-preview", + OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol + ) + COMPUTER_USE_PREVIEW_2025_03_11 = + T.let( + :"computer-use-preview-2025-03-11", + OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol] + ) + end + def self.values + end + end + + sig { override.returns(T::Array[OpenAI::AllModels::Variants]) } + def self.variants + end + end + end +end diff --git a/rbi/openai/models/audio/speech_create_params.rbi b/rbi/openai/models/audio/speech_create_params.rbi new file mode 100644 index 00000000..41949313 --- /dev/null +++ b/rbi/openai/models/audio/speech_create_params.rbi @@ -0,0 +1,321 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class SpeechCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Audio::SpeechCreateParams, OpenAI::Internal::AnyHash) + end + + # The text to generate audio for. The maximum length is 4096 characters. + sig { returns(String) } + attr_accessor :input + + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + sig { returns(T.any(String, OpenAI::Audio::SpeechModel::OrSymbol)) } + attr_accessor :model + + # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + sig do + returns( + T.any(String, OpenAI::Audio::SpeechCreateParams::Voice::OrSymbol) + ) + end + attr_accessor :voice + + # Control the voice of your generated audio with additional instructions. Does not + # work with `tts-1` or `tts-1-hd`. + sig { returns(T.nilable(String)) } + attr_reader :instructions + + sig { params(instructions: String).void } + attr_writer :instructions + + # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + # `wav`, and `pcm`. + sig do + returns( + T.nilable( + OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol + ) + ) + end + attr_reader :response_format + + sig do + params( + response_format: + OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol + ).void + end + attr_writer :response_format + + # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + # the default. + sig { returns(T.nilable(Float)) } + attr_reader :speed + + sig { params(speed: Float).void } + attr_writer :speed + + # The format to stream the audio in. Supported formats are `sse` and `audio`. + # `sse` is not supported for `tts-1` or `tts-1-hd`. + sig do + returns( + T.nilable(OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol) + ) + end + attr_reader :stream_format + + sig do + params( + stream_format: + OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol + ).void + end + attr_writer :stream_format + + sig do + params( + input: String, + model: T.any(String, OpenAI::Audio::SpeechModel::OrSymbol), + voice: + T.any(String, OpenAI::Audio::SpeechCreateParams::Voice::OrSymbol), + instructions: String, + response_format: + OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol, + speed: Float, + stream_format: + OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The text to generate audio for. The maximum length is 4096 characters. + input:, + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + model:, + # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + voice:, + # Control the voice of your generated audio with additional instructions. Does not + # work with `tts-1` or `tts-1-hd`. + instructions: nil, + # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + # `wav`, and `pcm`. + response_format: nil, + # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + # the default. + speed: nil, + # The format to stream the audio in. Supported formats are `sse` and `audio`. + # `sse` is not supported for `tts-1` or `tts-1-hd`. + stream_format: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + input: String, + model: T.any(String, OpenAI::Audio::SpeechModel::OrSymbol), + voice: + T.any( + String, + OpenAI::Audio::SpeechCreateParams::Voice::OrSymbol + ), + instructions: String, + response_format: + OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol, + speed: Float, + stream_format: + OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any(String, OpenAI::Audio::SpeechModel::TaggedSymbol) + end + + sig do + override.returns( + T::Array[OpenAI::Audio::SpeechCreateParams::Model::Variants] + ) + end + def self.variants + end + end + + # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + module Voice + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol + ) + end + + sig do + override.returns( + T::Array[OpenAI::Audio::SpeechCreateParams::Voice::Variants] + ) + end + def self.variants + end + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Audio::SpeechCreateParams::Voice) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ALLOY = + T.let( + :alloy, + OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol + ) + ASH = + T.let(:ash, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol) + BALLAD = + T.let( + :ballad, + OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol + ) + CORAL = + T.let( + :coral, + OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol + ) + ECHO = + T.let(:echo, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol) + SAGE = + T.let(:sage, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol) + SHIMMER = + T.let( + :shimmer, + OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol + ) + VERSE = + T.let( + :verse, + OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol + ) + end + + # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + # `wav`, and `pcm`. + module ResponseFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Audio::SpeechCreateParams::ResponseFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MP3 = + T.let( + :mp3, + OpenAI::Audio::SpeechCreateParams::ResponseFormat::TaggedSymbol + ) + OPUS = + T.let( + :opus, + OpenAI::Audio::SpeechCreateParams::ResponseFormat::TaggedSymbol + ) + AAC = + T.let( + :aac, + OpenAI::Audio::SpeechCreateParams::ResponseFormat::TaggedSymbol + ) + FLAC = + T.let( + :flac, + OpenAI::Audio::SpeechCreateParams::ResponseFormat::TaggedSymbol + ) + WAV = + T.let( + :wav, + OpenAI::Audio::SpeechCreateParams::ResponseFormat::TaggedSymbol + ) + PCM = + T.let( + :pcm, + OpenAI::Audio::SpeechCreateParams::ResponseFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Audio::SpeechCreateParams::ResponseFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The format to stream the audio in. Supported formats are `sse` and `audio`. + # `sse` is not supported for `tts-1` or `tts-1-hd`. + module StreamFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Audio::SpeechCreateParams::StreamFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SSE = + T.let( + :sse, + OpenAI::Audio::SpeechCreateParams::StreamFormat::TaggedSymbol + ) + AUDIO = + T.let( + :audio, + OpenAI::Audio::SpeechCreateParams::StreamFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Audio::SpeechCreateParams::StreamFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/audio/speech_model.rbi b/rbi/openai/models/audio/speech_model.rbi new file mode 100644 index 00000000..882f990f --- /dev/null +++ b/rbi/openai/models/audio/speech_model.rbi @@ -0,0 +1,26 @@ +# typed: strong + +module OpenAI + module Models + module Audio + module SpeechModel + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Audio::SpeechModel) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TTS_1 = T.let(:"tts-1", OpenAI::Audio::SpeechModel::TaggedSymbol) + TTS_1_HD = T.let(:"tts-1-hd", OpenAI::Audio::SpeechModel::TaggedSymbol) + GPT_4O_MINI_TTS = + T.let(:"gpt-4o-mini-tts", OpenAI::Audio::SpeechModel::TaggedSymbol) + + sig do + override.returns(T::Array[OpenAI::Audio::SpeechModel::TaggedSymbol]) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/audio/transcription.rbi b/rbi/openai/models/audio/transcription.rbi new file mode 100644 index 00000000..09191541 --- /dev/null +++ b/rbi/openai/models/audio/transcription.rbi @@ -0,0 +1,328 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class Transcription < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Audio::Transcription, OpenAI::Internal::AnyHash) + end + + # The transcribed text. + sig { returns(String) } + attr_accessor :text + + # The log probabilities of the tokens in the transcription. Only returned with the + # models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added + # to the `include` array. + sig do + returns(T.nilable(T::Array[OpenAI::Audio::Transcription::Logprob])) + end + attr_reader :logprobs + + sig do + params( + logprobs: T::Array[OpenAI::Audio::Transcription::Logprob::OrHash] + ).void + end + attr_writer :logprobs + + # Token usage statistics for the request. + sig do + returns(T.nilable(OpenAI::Audio::Transcription::Usage::Variants)) + end + attr_reader :usage + + sig do + params( + usage: + T.any( + OpenAI::Audio::Transcription::Usage::Tokens::OrHash, + OpenAI::Audio::Transcription::Usage::Duration::OrHash + ) + ).void + end + attr_writer :usage + + # Represents a transcription response returned by model, based on the provided + # input. + sig do + params( + text: String, + logprobs: T::Array[OpenAI::Audio::Transcription::Logprob::OrHash], + usage: + T.any( + OpenAI::Audio::Transcription::Usage::Tokens::OrHash, + OpenAI::Audio::Transcription::Usage::Duration::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # The transcribed text. + text:, + # The log probabilities of the tokens in the transcription. Only returned with the + # models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added + # to the `include` array. + logprobs: nil, + # Token usage statistics for the request. + usage: nil + ) + end + + sig do + override.returns( + { + text: String, + logprobs: T::Array[OpenAI::Audio::Transcription::Logprob], + usage: OpenAI::Audio::Transcription::Usage::Variants + } + ) + end + def to_hash + end + + class Logprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::Transcription::Logprob, + OpenAI::Internal::AnyHash + ) + end + + # The token in the transcription. + sig { returns(T.nilable(String)) } + attr_reader :token + + sig { params(token: String).void } + attr_writer :token + + # The bytes of the token. + sig { returns(T.nilable(T::Array[Float])) } + attr_reader :bytes + + sig { params(bytes: T::Array[Float]).void } + attr_writer :bytes + + # The log probability of the token. + sig { returns(T.nilable(Float)) } + attr_reader :logprob + + sig { params(logprob: Float).void } + attr_writer :logprob + + sig do + params( + token: String, + bytes: T::Array[Float], + logprob: Float + ).returns(T.attached_class) + end + def self.new( + # The token in the transcription. + token: nil, + # The bytes of the token. + bytes: nil, + # The log probability of the token. + logprob: nil + ) + end + + sig do + override.returns( + { token: String, bytes: T::Array[Float], logprob: Float } + ) + end + def to_hash + end + end + + # Token usage statistics for the request. + module Usage + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Audio::Transcription::Usage::Tokens, + OpenAI::Audio::Transcription::Usage::Duration + ) + end + + class Tokens < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::Transcription::Usage::Tokens, + OpenAI::Internal::AnyHash + ) + end + + # Number of input tokens billed for this request. + sig { returns(Integer) } + attr_accessor :input_tokens + + # Number of output tokens generated. + sig { returns(Integer) } + attr_accessor :output_tokens + + # Total number of tokens used (input + output). + sig { returns(Integer) } + attr_accessor :total_tokens + + # The type of the usage object. Always `tokens` for this variant. + sig { returns(Symbol) } + attr_accessor :type + + # Details about the input tokens billed for this request. + sig do + returns( + T.nilable( + OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails + ) + ) + end + attr_reader :input_token_details + + sig do + params( + input_token_details: + OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails::OrHash + ).void + end + attr_writer :input_token_details + + # Usage statistics for models billed by token usage. + sig do + params( + input_tokens: Integer, + output_tokens: Integer, + total_tokens: Integer, + input_token_details: + OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Number of input tokens billed for this request. + input_tokens:, + # Number of output tokens generated. + output_tokens:, + # Total number of tokens used (input + output). + total_tokens:, + # Details about the input tokens billed for this request. + input_token_details: nil, + # The type of the usage object. Always `tokens` for this variant. + type: :tokens + ) + end + + sig do + override.returns( + { + input_tokens: Integer, + output_tokens: Integer, + total_tokens: Integer, + type: Symbol, + input_token_details: + OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails + } + ) + end + def to_hash + end + + class InputTokenDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails, + OpenAI::Internal::AnyHash + ) + end + + # Number of audio tokens billed for this request. + sig { returns(T.nilable(Integer)) } + attr_reader :audio_tokens + + sig { params(audio_tokens: Integer).void } + attr_writer :audio_tokens + + # Number of text tokens billed for this request. + sig { returns(T.nilable(Integer)) } + attr_reader :text_tokens + + sig { params(text_tokens: Integer).void } + attr_writer :text_tokens + + # Details about the input tokens billed for this request. + sig do + params(audio_tokens: Integer, text_tokens: Integer).returns( + T.attached_class + ) + end + def self.new( + # Number of audio tokens billed for this request. + audio_tokens: nil, + # Number of text tokens billed for this request. + text_tokens: nil + ) + end + + sig do + override.returns( + { audio_tokens: Integer, text_tokens: Integer } + ) + end + def to_hash + end + end + end + + class Duration < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::Transcription::Usage::Duration, + OpenAI::Internal::AnyHash + ) + end + + # Duration of the input audio in seconds. + sig { returns(Float) } + attr_accessor :seconds + + # The type of the usage object. Always `duration` for this variant. + sig { returns(Symbol) } + attr_accessor :type + + # Usage statistics for models billed by audio input duration. + sig do + params(seconds: Float, type: Symbol).returns(T.attached_class) + end + def self.new( + # Duration of the input audio in seconds. + seconds:, + # The type of the usage object. Always `duration` for this variant. + type: :duration + ) + end + + sig { override.returns({ seconds: Float, type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[OpenAI::Audio::Transcription::Usage::Variants] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/audio/transcription_create_params.rbi b/rbi/openai/models/audio/transcription_create_params.rbi new file mode 100644 index 00000000..c3dc13df --- /dev/null +++ b/rbi/openai/models/audio/transcription_create_params.rbi @@ -0,0 +1,424 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # The audio file object (not file name) to transcribe, in one of these formats: + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + sig { returns(OpenAI::Internal::FileInput) } + attr_accessor :file + + # ID of the model to use. The options are `gpt-4o-transcribe`, + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). + sig { returns(T.any(String, OpenAI::AudioModel::OrSymbol)) } + attr_accessor :model + + # Controls how the audio is cut into chunks. When set to `"auto"`, the server + # first normalizes loudness and then uses voice activity detection (VAD) to choose + # boundaries. `server_vad` object can be provided to tweak VAD detection + # parameters manually. If unset, the audio is transcribed as a single block. + sig do + returns( + T.nilable( + T.any( + Symbol, + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig + ) + ) + ) + end + attr_accessor :chunking_strategy + + # Additional information to include in the transcription response. `logprobs` will + # return the log probabilities of the tokens in the response to understand the + # model's confidence in the transcription. `logprobs` only works with + # response_format set to `json` and only with the models `gpt-4o-transcribe` and + # `gpt-4o-mini-transcribe`. + sig do + returns( + T.nilable(T::Array[OpenAI::Audio::TranscriptionInclude::OrSymbol]) + ) + end + attr_reader :include + + sig do + params( + include: T::Array[OpenAI::Audio::TranscriptionInclude::OrSymbol] + ).void + end + attr_writer :include + + # The language of the input audio. Supplying the input language in + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. + sig { returns(T.nilable(String)) } + attr_reader :language + + sig { params(language: String).void } + attr_writer :language + + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. + sig { returns(T.nilable(String)) } + attr_reader :prompt + + sig { params(prompt: String).void } + attr_writer :prompt + + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. + sig { returns(T.nilable(OpenAI::AudioResponseFormat::OrSymbol)) } + attr_reader :response_format + + sig do + params(response_format: OpenAI::AudioResponseFormat::OrSymbol).void + end + attr_writer :response_format + + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. + sig { returns(T.nilable(Float)) } + attr_reader :temperature + + sig { params(temperature: Float).void } + attr_writer :temperature + + # The timestamp granularities to populate for this transcription. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity::OrSymbol + ] + ) + ) + end + attr_reader :timestamp_granularities + + sig do + params( + timestamp_granularities: + T::Array[ + OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity::OrSymbol + ] + ).void + end + attr_writer :timestamp_granularities + + sig do + params( + file: OpenAI::Internal::FileInput, + model: T.any(String, OpenAI::AudioModel::OrSymbol), + chunking_strategy: + T.nilable( + T.any( + Symbol, + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::OrHash + ) + ), + include: T::Array[OpenAI::Audio::TranscriptionInclude::OrSymbol], + language: String, + prompt: String, + response_format: OpenAI::AudioResponseFormat::OrSymbol, + temperature: Float, + timestamp_granularities: + T::Array[ + OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity::OrSymbol + ], + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The audio file object (not file name) to transcribe, in one of these formats: + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + file:, + # ID of the model to use. The options are `gpt-4o-transcribe`, + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). + model:, + # Controls how the audio is cut into chunks. When set to `"auto"`, the server + # first normalizes loudness and then uses voice activity detection (VAD) to choose + # boundaries. `server_vad` object can be provided to tweak VAD detection + # parameters manually. If unset, the audio is transcribed as a single block. + chunking_strategy: nil, + # Additional information to include in the transcription response. `logprobs` will + # return the log probabilities of the tokens in the response to understand the + # model's confidence in the transcription. `logprobs` only works with + # response_format set to `json` and only with the models `gpt-4o-transcribe` and + # `gpt-4o-mini-transcribe`. + include: nil, + # The language of the input audio. Supplying the input language in + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. + language: nil, + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. + prompt: nil, + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. + response_format: nil, + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. + temperature: nil, + # The timestamp granularities to populate for this transcription. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. + timestamp_granularities: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + file: OpenAI::Internal::FileInput, + model: T.any(String, OpenAI::AudioModel::OrSymbol), + chunking_strategy: + T.nilable( + T.any( + Symbol, + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig + ) + ), + include: T::Array[OpenAI::Audio::TranscriptionInclude::OrSymbol], + language: String, + prompt: String, + response_format: OpenAI::AudioResponseFormat::OrSymbol, + temperature: Float, + timestamp_granularities: + T::Array[ + OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity::OrSymbol + ], + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # ID of the model to use. The options are `gpt-4o-transcribe`, + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::AudioModel::TaggedSymbol) } + + sig do + override.returns( + T::Array[ + OpenAI::Audio::TranscriptionCreateParams::Model::Variants + ] + ) + end + def self.variants + end + end + + # Controls how the audio is cut into chunks. When set to `"auto"`, the server + # first normalizes loudness and then uses voice activity detection (VAD) to choose + # boundaries. `server_vad` object can be provided to tweak VAD detection + # parameters manually. If unset, the audio is transcribed as a single block. + module ChunkingStrategy + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + Symbol, + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig + ) + end + + class VadConfig < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, + OpenAI::Internal::AnyHash + ) + end + + # Must be set to `server_vad` to enable manual chunking using server side VAD. + sig do + returns( + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type::OrSymbol + ) + end + attr_accessor :type + + # Amount of audio to include before the VAD detected speech (in milliseconds). + sig { returns(T.nilable(Integer)) } + attr_reader :prefix_padding_ms + + sig { params(prefix_padding_ms: Integer).void } + attr_writer :prefix_padding_ms + + # Duration of silence to detect speech stop (in milliseconds). With shorter values + # the model will respond more quickly, but may jump in on short pauses from the + # user. + sig { returns(T.nilable(Integer)) } + attr_reader :silence_duration_ms + + sig { params(silence_duration_ms: Integer).void } + attr_writer :silence_duration_ms + + # Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher + # threshold will require louder audio to activate the model, and thus might + # perform better in noisy environments. + sig { returns(T.nilable(Float)) } + attr_reader :threshold + + sig { params(threshold: Float).void } + attr_writer :threshold + + sig do + params( + type: + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type::OrSymbol, + prefix_padding_ms: Integer, + silence_duration_ms: Integer, + threshold: Float + ).returns(T.attached_class) + end + def self.new( + # Must be set to `server_vad` to enable manual chunking using server side VAD. + type:, + # Amount of audio to include before the VAD detected speech (in milliseconds). + prefix_padding_ms: nil, + # Duration of silence to detect speech stop (in milliseconds). With shorter values + # the model will respond more quickly, but may jump in on short pauses from the + # user. + silence_duration_ms: nil, + # Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher + # threshold will require louder audio to activate the model, and thus might + # perform better in noisy environments. + threshold: nil + ) + end + + sig do + override.returns( + { + type: + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type::OrSymbol, + prefix_padding_ms: Integer, + silence_duration_ms: Integer, + threshold: Float + } + ) + end + def to_hash + end + + # Must be set to `server_vad` to enable manual chunking using server side VAD. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SERVER_VAD = + T.let( + :server_vad, + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::Variants + ] + ) + end + def self.variants + end + end + + module TimestampGranularity + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + WORD = + T.let( + :word, + OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity::TaggedSymbol + ) + SEGMENT = + T.let( + :segment, + OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/audio/transcription_create_response.rbi b/rbi/openai/models/audio/transcription_create_response.rbi new file mode 100644 index 00000000..a96f3c65 --- /dev/null +++ b/rbi/openai/models/audio/transcription_create_response.rbi @@ -0,0 +1,31 @@ +# typed: strong + +module OpenAI + module Models + module Audio + # Represents a transcription response returned by model, based on the provided + # input. + module TranscriptionCreateResponse + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Audio::Transcription, + OpenAI::Audio::TranscriptionVerbose + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Audio::TranscriptionCreateResponse::Variants + ] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/audio/transcription_include.rbi b/rbi/openai/models/audio/transcription_include.rbi new file mode 100644 index 00000000..a9400086 --- /dev/null +++ b/rbi/openai/models/audio/transcription_include.rbi @@ -0,0 +1,26 @@ +# typed: strong + +module OpenAI + module Models + module Audio + module TranscriptionInclude + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Audio::TranscriptionInclude) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOGPROBS = + T.let(:logprobs, OpenAI::Audio::TranscriptionInclude::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Audio::TranscriptionInclude::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/audio/transcription_segment.rbi b/rbi/openai/models/audio/transcription_segment.rbi new file mode 100644 index 00000000..a86ed570 --- /dev/null +++ b/rbi/openai/models/audio/transcription_segment.rbi @@ -0,0 +1,120 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class TranscriptionSegment < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionSegment, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier of the segment. + sig { returns(Integer) } + attr_accessor :id + + # Average logprob of the segment. If the value is lower than -1, consider the + # logprobs failed. + sig { returns(Float) } + attr_accessor :avg_logprob + + # Compression ratio of the segment. If the value is greater than 2.4, consider the + # compression failed. + sig { returns(Float) } + attr_accessor :compression_ratio + + # End time of the segment in seconds. + sig { returns(Float) } + attr_accessor :end_ + + # Probability of no speech in the segment. If the value is higher than 1.0 and the + # `avg_logprob` is below -1, consider this segment silent. + sig { returns(Float) } + attr_accessor :no_speech_prob + + # Seek offset of the segment. + sig { returns(Integer) } + attr_accessor :seek + + # Start time of the segment in seconds. + sig { returns(Float) } + attr_accessor :start + + # Temperature parameter used for generating the segment. + sig { returns(Float) } + attr_accessor :temperature + + # Text content of the segment. + sig { returns(String) } + attr_accessor :text + + # Array of token IDs for the text content. + sig { returns(T::Array[Integer]) } + attr_accessor :tokens + + sig do + params( + id: Integer, + avg_logprob: Float, + compression_ratio: Float, + end_: Float, + no_speech_prob: Float, + seek: Integer, + start: Float, + temperature: Float, + text: String, + tokens: T::Array[Integer] + ).returns(T.attached_class) + end + def self.new( + # Unique identifier of the segment. + id:, + # Average logprob of the segment. If the value is lower than -1, consider the + # logprobs failed. + avg_logprob:, + # Compression ratio of the segment. If the value is greater than 2.4, consider the + # compression failed. + compression_ratio:, + # End time of the segment in seconds. + end_:, + # Probability of no speech in the segment. If the value is higher than 1.0 and the + # `avg_logprob` is below -1, consider this segment silent. + no_speech_prob:, + # Seek offset of the segment. + seek:, + # Start time of the segment in seconds. + start:, + # Temperature parameter used for generating the segment. + temperature:, + # Text content of the segment. + text:, + # Array of token IDs for the text content. + tokens: + ) + end + + sig do + override.returns( + { + id: Integer, + avg_logprob: Float, + compression_ratio: Float, + end_: Float, + no_speech_prob: Float, + seek: Integer, + start: Float, + temperature: Float, + text: String, + tokens: T::Array[Integer] + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/audio/transcription_stream_event.rbi b/rbi/openai/models/audio/transcription_stream_event.rbi new file mode 100644 index 00000000..7c5989b6 --- /dev/null +++ b/rbi/openai/models/audio/transcription_stream_event.rbi @@ -0,0 +1,31 @@ +# typed: strong + +module OpenAI + module Models + module Audio + # Emitted when there is an additional text delta. This is also the first event + # emitted when the transcription starts. Only emitted when you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `Stream` parameter set to `true`. + module TranscriptionStreamEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionTextDeltaEvent, + OpenAI::Audio::TranscriptionTextDoneEvent + ) + end + + sig do + override.returns( + T::Array[OpenAI::Audio::TranscriptionStreamEvent::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/audio/transcription_text_delta_event.rbi b/rbi/openai/models/audio/transcription_text_delta_event.rbi new file mode 100644 index 00000000..d8707c86 --- /dev/null +++ b/rbi/openai/models/audio/transcription_text_delta_event.rbi @@ -0,0 +1,142 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class TranscriptionTextDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionTextDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The text delta that was additionally transcribed. + sig { returns(String) } + attr_accessor :delta + + # The type of the event. Always `transcript.text.delta`. + sig { returns(Symbol) } + attr_accessor :type + + # The log probabilities of the delta. Only included if you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `include[]` parameter set to `logprobs`. + sig do + returns( + T.nilable( + T::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob] + ) + ) + end + attr_reader :logprobs + + sig do + params( + logprobs: + T::Array[ + OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob::OrHash + ] + ).void + end + attr_writer :logprobs + + # Emitted when there is an additional text delta. This is also the first event + # emitted when the transcription starts. Only emitted when you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `Stream` parameter set to `true`. + sig do + params( + delta: String, + logprobs: + T::Array[ + OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob::OrHash + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The text delta that was additionally transcribed. + delta:, + # The log probabilities of the delta. Only included if you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `include[]` parameter set to `logprobs`. + logprobs: nil, + # The type of the event. Always `transcript.text.delta`. + type: :"transcript.text.delta" + ) + end + + sig do + override.returns( + { + delta: String, + type: Symbol, + logprobs: + T::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob] + } + ) + end + def to_hash + end + + class Logprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob, + OpenAI::Internal::AnyHash + ) + end + + # The token that was used to generate the log probability. + sig { returns(T.nilable(String)) } + attr_reader :token + + sig { params(token: String).void } + attr_writer :token + + # The bytes that were used to generate the log probability. + sig { returns(T.nilable(T::Array[Integer])) } + attr_reader :bytes + + sig { params(bytes: T::Array[Integer]).void } + attr_writer :bytes + + # The log probability of the token. + sig { returns(T.nilable(Float)) } + attr_reader :logprob + + sig { params(logprob: Float).void } + attr_writer :logprob + + sig do + params( + token: String, + bytes: T::Array[Integer], + logprob: Float + ).returns(T.attached_class) + end + def self.new( + # The token that was used to generate the log probability. + token: nil, + # The bytes that were used to generate the log probability. + bytes: nil, + # The log probability of the token. + logprob: nil + ) + end + + sig do + override.returns( + { token: String, bytes: T::Array[Integer], logprob: Float } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/audio/transcription_text_done_event.rbi b/rbi/openai/models/audio/transcription_text_done_event.rbi new file mode 100644 index 00000000..c1135e31 --- /dev/null +++ b/rbi/openai/models/audio/transcription_text_done_event.rbi @@ -0,0 +1,289 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class TranscriptionTextDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionTextDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The text that was transcribed. + sig { returns(String) } + attr_accessor :text + + # The type of the event. Always `transcript.text.done`. + sig { returns(Symbol) } + attr_accessor :type + + # The log probabilities of the individual tokens in the transcription. Only + # included if you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `include[]` parameter set to `logprobs`. + sig do + returns( + T.nilable( + T::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob] + ) + ) + end + attr_reader :logprobs + + sig do + params( + logprobs: + T::Array[ + OpenAI::Audio::TranscriptionTextDoneEvent::Logprob::OrHash + ] + ).void + end + attr_writer :logprobs + + # Usage statistics for models billed by token usage. + sig do + returns(T.nilable(OpenAI::Audio::TranscriptionTextDoneEvent::Usage)) + end + attr_reader :usage + + sig do + params( + usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::OrHash + ).void + end + attr_writer :usage + + # Emitted when the transcription is complete. Contains the complete transcription + # text. Only emitted when you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `Stream` parameter set to `true`. + sig do + params( + text: String, + logprobs: + T::Array[ + OpenAI::Audio::TranscriptionTextDoneEvent::Logprob::OrHash + ], + usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The text that was transcribed. + text:, + # The log probabilities of the individual tokens in the transcription. Only + # included if you + # [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + # with the `include[]` parameter set to `logprobs`. + logprobs: nil, + # Usage statistics for models billed by token usage. + usage: nil, + # The type of the event. Always `transcript.text.done`. + type: :"transcript.text.done" + ) + end + + sig do + override.returns( + { + text: String, + type: Symbol, + logprobs: + T::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob], + usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage + } + ) + end + def to_hash + end + + class Logprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionTextDoneEvent::Logprob, + OpenAI::Internal::AnyHash + ) + end + + # The token that was used to generate the log probability. + sig { returns(T.nilable(String)) } + attr_reader :token + + sig { params(token: String).void } + attr_writer :token + + # The bytes that were used to generate the log probability. + sig { returns(T.nilable(T::Array[Integer])) } + attr_reader :bytes + + sig { params(bytes: T::Array[Integer]).void } + attr_writer :bytes + + # The log probability of the token. + sig { returns(T.nilable(Float)) } + attr_reader :logprob + + sig { params(logprob: Float).void } + attr_writer :logprob + + sig do + params( + token: String, + bytes: T::Array[Integer], + logprob: Float + ).returns(T.attached_class) + end + def self.new( + # The token that was used to generate the log probability. + token: nil, + # The bytes that were used to generate the log probability. + bytes: nil, + # The log probability of the token. + logprob: nil + ) + end + + sig do + override.returns( + { token: String, bytes: T::Array[Integer], logprob: Float } + ) + end + def to_hash + end + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionTextDoneEvent::Usage, + OpenAI::Internal::AnyHash + ) + end + + # Number of input tokens billed for this request. + sig { returns(Integer) } + attr_accessor :input_tokens + + # Number of output tokens generated. + sig { returns(Integer) } + attr_accessor :output_tokens + + # Total number of tokens used (input + output). + sig { returns(Integer) } + attr_accessor :total_tokens + + # The type of the usage object. Always `tokens` for this variant. + sig { returns(Symbol) } + attr_accessor :type + + # Details about the input tokens billed for this request. + sig do + returns( + T.nilable( + OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails + ) + ) + end + attr_reader :input_token_details + + sig do + params( + input_token_details: + OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails::OrHash + ).void + end + attr_writer :input_token_details + + # Usage statistics for models billed by token usage. + sig do + params( + input_tokens: Integer, + output_tokens: Integer, + total_tokens: Integer, + input_token_details: + OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Number of input tokens billed for this request. + input_tokens:, + # Number of output tokens generated. + output_tokens:, + # Total number of tokens used (input + output). + total_tokens:, + # Details about the input tokens billed for this request. + input_token_details: nil, + # The type of the usage object. Always `tokens` for this variant. + type: :tokens + ) + end + + sig do + override.returns( + { + input_tokens: Integer, + output_tokens: Integer, + total_tokens: Integer, + type: Symbol, + input_token_details: + OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails + } + ) + end + def to_hash + end + + class InputTokenDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails, + OpenAI::Internal::AnyHash + ) + end + + # Number of audio tokens billed for this request. + sig { returns(T.nilable(Integer)) } + attr_reader :audio_tokens + + sig { params(audio_tokens: Integer).void } + attr_writer :audio_tokens + + # Number of text tokens billed for this request. + sig { returns(T.nilable(Integer)) } + attr_reader :text_tokens + + sig { params(text_tokens: Integer).void } + attr_writer :text_tokens + + # Details about the input tokens billed for this request. + sig do + params(audio_tokens: Integer, text_tokens: Integer).returns( + T.attached_class + ) + end + def self.new( + # Number of audio tokens billed for this request. + audio_tokens: nil, + # Number of text tokens billed for this request. + text_tokens: nil + ) + end + + sig do + override.returns({ audio_tokens: Integer, text_tokens: Integer }) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/audio/transcription_verbose.rbi b/rbi/openai/models/audio/transcription_verbose.rbi new file mode 100644 index 00000000..e3351d37 --- /dev/null +++ b/rbi/openai/models/audio/transcription_verbose.rbi @@ -0,0 +1,135 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class TranscriptionVerbose < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionVerbose, + OpenAI::Internal::AnyHash + ) + end + + # The duration of the input audio. + sig { returns(Float) } + attr_accessor :duration + + # The language of the input audio. + sig { returns(String) } + attr_accessor :language + + # The transcribed text. + sig { returns(String) } + attr_accessor :text + + # Segments of the transcribed text and their corresponding details. + sig do + returns(T.nilable(T::Array[OpenAI::Audio::TranscriptionSegment])) + end + attr_reader :segments + + sig do + params( + segments: T::Array[OpenAI::Audio::TranscriptionSegment::OrHash] + ).void + end + attr_writer :segments + + # Usage statistics for models billed by audio input duration. + sig { returns(T.nilable(OpenAI::Audio::TranscriptionVerbose::Usage)) } + attr_reader :usage + + sig do + params(usage: OpenAI::Audio::TranscriptionVerbose::Usage::OrHash).void + end + attr_writer :usage + + # Extracted words and their corresponding timestamps. + sig { returns(T.nilable(T::Array[OpenAI::Audio::TranscriptionWord])) } + attr_reader :words + + sig do + params(words: T::Array[OpenAI::Audio::TranscriptionWord::OrHash]).void + end + attr_writer :words + + # Represents a verbose json transcription response returned by model, based on the + # provided input. + sig do + params( + duration: Float, + language: String, + text: String, + segments: T::Array[OpenAI::Audio::TranscriptionSegment::OrHash], + usage: OpenAI::Audio::TranscriptionVerbose::Usage::OrHash, + words: T::Array[OpenAI::Audio::TranscriptionWord::OrHash] + ).returns(T.attached_class) + end + def self.new( + # The duration of the input audio. + duration:, + # The language of the input audio. + language:, + # The transcribed text. + text:, + # Segments of the transcribed text and their corresponding details. + segments: nil, + # Usage statistics for models billed by audio input duration. + usage: nil, + # Extracted words and their corresponding timestamps. + words: nil + ) + end + + sig do + override.returns( + { + duration: Float, + language: String, + text: String, + segments: T::Array[OpenAI::Audio::TranscriptionSegment], + usage: OpenAI::Audio::TranscriptionVerbose::Usage, + words: T::Array[OpenAI::Audio::TranscriptionWord] + } + ) + end + def to_hash + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranscriptionVerbose::Usage, + OpenAI::Internal::AnyHash + ) + end + + # Duration of the input audio in seconds. + sig { returns(Float) } + attr_accessor :seconds + + # The type of the usage object. Always `duration` for this variant. + sig { returns(Symbol) } + attr_accessor :type + + # Usage statistics for models billed by audio input duration. + sig { params(seconds: Float, type: Symbol).returns(T.attached_class) } + def self.new( + # Duration of the input audio in seconds. + seconds:, + # The type of the usage object. Always `duration` for this variant. + type: :duration + ) + end + + sig { override.returns({ seconds: Float, type: Symbol }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/audio/transcription_word.rbi b/rbi/openai/models/audio/transcription_word.rbi new file mode 100644 index 00000000..2bf37011 --- /dev/null +++ b/rbi/openai/models/audio/transcription_word.rbi @@ -0,0 +1,45 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class TranscriptionWord < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Audio::TranscriptionWord, OpenAI::Internal::AnyHash) + end + + # End time of the word in seconds. + sig { returns(Float) } + attr_accessor :end_ + + # Start time of the word in seconds. + sig { returns(Float) } + attr_accessor :start + + # The text content of the word. + sig { returns(String) } + attr_accessor :word + + sig do + params(end_: Float, start: Float, word: String).returns( + T.attached_class + ) + end + def self.new( + # End time of the word in seconds. + end_:, + # Start time of the word in seconds. + start:, + # The text content of the word. + word: + ) + end + + sig { override.returns({ end_: Float, start: Float, word: String }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/audio/translation.rbi b/rbi/openai/models/audio/translation.rbi new file mode 100644 index 00000000..dd78df7f --- /dev/null +++ b/rbi/openai/models/audio/translation.rbi @@ -0,0 +1,25 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class Translation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Audio::Translation, OpenAI::Internal::AnyHash) + end + + sig { returns(String) } + attr_accessor :text + + sig { params(text: String).returns(T.attached_class) } + def self.new(text:) + end + + sig { override.returns({ text: String }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/audio/translation_create_params.rbi b/rbi/openai/models/audio/translation_create_params.rbi new file mode 100644 index 00000000..1dc35166 --- /dev/null +++ b/rbi/openai/models/audio/translation_create_params.rbi @@ -0,0 +1,190 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class TranslationCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Audio::TranslationCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # The audio file object (not file name) translate, in one of these formats: flac, + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + sig { returns(OpenAI::Internal::FileInput) } + attr_accessor :file + + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. + sig { returns(T.any(String, OpenAI::AudioModel::OrSymbol)) } + attr_accessor :model + + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. + sig { returns(T.nilable(String)) } + attr_reader :prompt + + sig { params(prompt: String).void } + attr_writer :prompt + + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. + sig do + returns( + T.nilable( + OpenAI::Audio::TranslationCreateParams::ResponseFormat::OrSymbol + ) + ) + end + attr_reader :response_format + + sig do + params( + response_format: + OpenAI::Audio::TranslationCreateParams::ResponseFormat::OrSymbol + ).void + end + attr_writer :response_format + + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. + sig { returns(T.nilable(Float)) } + attr_reader :temperature + + sig { params(temperature: Float).void } + attr_writer :temperature + + sig do + params( + file: OpenAI::Internal::FileInput, + model: T.any(String, OpenAI::AudioModel::OrSymbol), + prompt: String, + response_format: + OpenAI::Audio::TranslationCreateParams::ResponseFormat::OrSymbol, + temperature: Float, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The audio file object (not file name) translate, in one of these formats: flac, + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + file:, + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. + model:, + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. + prompt: nil, + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. + response_format: nil, + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. + temperature: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + file: OpenAI::Internal::FileInput, + model: T.any(String, OpenAI::AudioModel::OrSymbol), + prompt: String, + response_format: + OpenAI::Audio::TranslationCreateParams::ResponseFormat::OrSymbol, + temperature: Float, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::AudioModel::TaggedSymbol) } + + sig do + override.returns( + T::Array[OpenAI::Audio::TranslationCreateParams::Model::Variants] + ) + end + def self.variants + end + end + + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. + module ResponseFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Audio::TranslationCreateParams::ResponseFormat + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + JSON = + T.let( + :json, + OpenAI::Audio::TranslationCreateParams::ResponseFormat::TaggedSymbol + ) + TEXT = + T.let( + :text, + OpenAI::Audio::TranslationCreateParams::ResponseFormat::TaggedSymbol + ) + SRT = + T.let( + :srt, + OpenAI::Audio::TranslationCreateParams::ResponseFormat::TaggedSymbol + ) + VERBOSE_JSON = + T.let( + :verbose_json, + OpenAI::Audio::TranslationCreateParams::ResponseFormat::TaggedSymbol + ) + VTT = + T.let( + :vtt, + OpenAI::Audio::TranslationCreateParams::ResponseFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Audio::TranslationCreateParams::ResponseFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/audio/translation_create_response.rbi b/rbi/openai/models/audio/translation_create_response.rbi new file mode 100644 index 00000000..528526c0 --- /dev/null +++ b/rbi/openai/models/audio/translation_create_response.rbi @@ -0,0 +1,24 @@ +# typed: strong + +module OpenAI + module Models + module Audio + module TranslationCreateResponse + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any(OpenAI::Audio::Translation, OpenAI::Audio::TranslationVerbose) + end + + sig do + override.returns( + T::Array[OpenAI::Models::Audio::TranslationCreateResponse::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/audio/translation_verbose.rbi b/rbi/openai/models/audio/translation_verbose.rbi new file mode 100644 index 00000000..4dfdee59 --- /dev/null +++ b/rbi/openai/models/audio/translation_verbose.rbi @@ -0,0 +1,72 @@ +# typed: strong + +module OpenAI + module Models + module Audio + class TranslationVerbose < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Audio::TranslationVerbose, OpenAI::Internal::AnyHash) + end + + # The duration of the input audio. + sig { returns(Float) } + attr_accessor :duration + + # The language of the output translation (always `english`). + sig { returns(String) } + attr_accessor :language + + # The translated text. + sig { returns(String) } + attr_accessor :text + + # Segments of the translated text and their corresponding details. + sig do + returns(T.nilable(T::Array[OpenAI::Audio::TranscriptionSegment])) + end + attr_reader :segments + + sig do + params( + segments: T::Array[OpenAI::Audio::TranscriptionSegment::OrHash] + ).void + end + attr_writer :segments + + sig do + params( + duration: Float, + language: String, + text: String, + segments: T::Array[OpenAI::Audio::TranscriptionSegment::OrHash] + ).returns(T.attached_class) + end + def self.new( + # The duration of the input audio. + duration:, + # The language of the output translation (always `english`). + language:, + # The translated text. + text:, + # Segments of the translated text and their corresponding details. + segments: nil + ) + end + + sig do + override.returns( + { + duration: Float, + language: String, + text: String, + segments: T::Array[OpenAI::Audio::TranscriptionSegment] + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/audio_model.rbi b/rbi/openai/models/audio_model.rbi new file mode 100644 index 00000000..3f22719a --- /dev/null +++ b/rbi/openai/models/audio_model.rbi @@ -0,0 +1,22 @@ +# typed: strong + +module OpenAI + module Models + module AudioModel + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::AudioModel) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + WHISPER_1 = T.let(:"whisper-1", OpenAI::AudioModel::TaggedSymbol) + GPT_4O_TRANSCRIBE = + T.let(:"gpt-4o-transcribe", OpenAI::AudioModel::TaggedSymbol) + GPT_4O_MINI_TRANSCRIBE = + T.let(:"gpt-4o-mini-transcribe", OpenAI::AudioModel::TaggedSymbol) + + sig { override.returns(T::Array[OpenAI::AudioModel::TaggedSymbol]) } + def self.values + end + end + end +end diff --git a/rbi/openai/models/audio_response_format.rbi b/rbi/openai/models/audio_response_format.rbi new file mode 100644 index 00000000..4afcf558 --- /dev/null +++ b/rbi/openai/models/audio_response_format.rbi @@ -0,0 +1,28 @@ +# typed: strong + +module OpenAI + module Models + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. + module AudioResponseFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::AudioResponseFormat) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + JSON = T.let(:json, OpenAI::AudioResponseFormat::TaggedSymbol) + TEXT = T.let(:text, OpenAI::AudioResponseFormat::TaggedSymbol) + SRT = T.let(:srt, OpenAI::AudioResponseFormat::TaggedSymbol) + VERBOSE_JSON = + T.let(:verbose_json, OpenAI::AudioResponseFormat::TaggedSymbol) + VTT = T.let(:vtt, OpenAI::AudioResponseFormat::TaggedSymbol) + + sig do + override.returns(T::Array[OpenAI::AudioResponseFormat::TaggedSymbol]) + end + def self.values + end + end + end +end diff --git a/rbi/openai/models/auto_file_chunking_strategy_param.rbi b/rbi/openai/models/auto_file_chunking_strategy_param.rbi new file mode 100644 index 00000000..91995701 --- /dev/null +++ b/rbi/openai/models/auto_file_chunking_strategy_param.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + class AutoFileChunkingStrategyParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::AutoFileChunkingStrategyParam, + OpenAI::Internal::AnyHash + ) + end + + # Always `auto`. + sig { returns(Symbol) } + attr_accessor :type + + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Always `auto`. + type: :auto + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/batch.rbi b/rbi/openai/models/batch.rbi new file mode 100644 index 00000000..3d6ce897 --- /dev/null +++ b/rbi/openai/models/batch.rbi @@ -0,0 +1,290 @@ +# typed: strong + +module OpenAI + module Models + class Batch < OpenAI::Internal::Type::BaseModel + OrHash = T.type_alias { T.any(OpenAI::Batch, OpenAI::Internal::AnyHash) } + + sig { returns(String) } + attr_accessor :id + + # The time frame within which the batch should be processed. + sig { returns(String) } + attr_accessor :completion_window + + # The Unix timestamp (in seconds) for when the batch was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The OpenAI API endpoint used by the batch. + sig { returns(String) } + attr_accessor :endpoint + + # The ID of the input file for the batch. + sig { returns(String) } + attr_accessor :input_file_id + + # The object type, which is always `batch`. + sig { returns(Symbol) } + attr_accessor :object + + # The current status of the batch. + sig { returns(OpenAI::Batch::Status::TaggedSymbol) } + attr_accessor :status + + # The Unix timestamp (in seconds) for when the batch was cancelled. + sig { returns(T.nilable(Integer)) } + attr_reader :cancelled_at + + sig { params(cancelled_at: Integer).void } + attr_writer :cancelled_at + + # The Unix timestamp (in seconds) for when the batch started cancelling. + sig { returns(T.nilable(Integer)) } + attr_reader :cancelling_at + + sig { params(cancelling_at: Integer).void } + attr_writer :cancelling_at + + # The Unix timestamp (in seconds) for when the batch was completed. + sig { returns(T.nilable(Integer)) } + attr_reader :completed_at + + sig { params(completed_at: Integer).void } + attr_writer :completed_at + + # The ID of the file containing the outputs of requests with errors. + sig { returns(T.nilable(String)) } + attr_reader :error_file_id + + sig { params(error_file_id: String).void } + attr_writer :error_file_id + + sig { returns(T.nilable(OpenAI::Batch::Errors)) } + attr_reader :errors + + sig { params(errors: OpenAI::Batch::Errors::OrHash).void } + attr_writer :errors + + # The Unix timestamp (in seconds) for when the batch expired. + sig { returns(T.nilable(Integer)) } + attr_reader :expired_at + + sig { params(expired_at: Integer).void } + attr_writer :expired_at + + # The Unix timestamp (in seconds) for when the batch will expire. + sig { returns(T.nilable(Integer)) } + attr_reader :expires_at + + sig { params(expires_at: Integer).void } + attr_writer :expires_at + + # The Unix timestamp (in seconds) for when the batch failed. + sig { returns(T.nilable(Integer)) } + attr_reader :failed_at + + sig { params(failed_at: Integer).void } + attr_writer :failed_at + + # The Unix timestamp (in seconds) for when the batch started finalizing. + sig { returns(T.nilable(Integer)) } + attr_reader :finalizing_at + + sig { params(finalizing_at: Integer).void } + attr_writer :finalizing_at + + # The Unix timestamp (in seconds) for when the batch started processing. + sig { returns(T.nilable(Integer)) } + attr_reader :in_progress_at + + sig { params(in_progress_at: Integer).void } + attr_writer :in_progress_at + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The ID of the file containing the outputs of successfully executed requests. + sig { returns(T.nilable(String)) } + attr_reader :output_file_id + + sig { params(output_file_id: String).void } + attr_writer :output_file_id + + # The request counts for different statuses within the batch. + sig { returns(T.nilable(OpenAI::BatchRequestCounts)) } + attr_reader :request_counts + + sig { params(request_counts: OpenAI::BatchRequestCounts::OrHash).void } + attr_writer :request_counts + + sig do + params( + id: String, + completion_window: String, + created_at: Integer, + endpoint: String, + input_file_id: String, + status: OpenAI::Batch::Status::OrSymbol, + cancelled_at: Integer, + cancelling_at: Integer, + completed_at: Integer, + error_file_id: String, + errors: OpenAI::Batch::Errors::OrHash, + expired_at: Integer, + expires_at: Integer, + failed_at: Integer, + finalizing_at: Integer, + in_progress_at: Integer, + metadata: T.nilable(T::Hash[Symbol, String]), + output_file_id: String, + request_counts: OpenAI::BatchRequestCounts::OrHash, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + id:, + # The time frame within which the batch should be processed. + completion_window:, + # The Unix timestamp (in seconds) for when the batch was created. + created_at:, + # The OpenAI API endpoint used by the batch. + endpoint:, + # The ID of the input file for the batch. + input_file_id:, + # The current status of the batch. + status:, + # The Unix timestamp (in seconds) for when the batch was cancelled. + cancelled_at: nil, + # The Unix timestamp (in seconds) for when the batch started cancelling. + cancelling_at: nil, + # The Unix timestamp (in seconds) for when the batch was completed. + completed_at: nil, + # The ID of the file containing the outputs of requests with errors. + error_file_id: nil, + errors: nil, + # The Unix timestamp (in seconds) for when the batch expired. + expired_at: nil, + # The Unix timestamp (in seconds) for when the batch will expire. + expires_at: nil, + # The Unix timestamp (in seconds) for when the batch failed. + failed_at: nil, + # The Unix timestamp (in seconds) for when the batch started finalizing. + finalizing_at: nil, + # The Unix timestamp (in seconds) for when the batch started processing. + in_progress_at: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The ID of the file containing the outputs of successfully executed requests. + output_file_id: nil, + # The request counts for different statuses within the batch. + request_counts: nil, + # The object type, which is always `batch`. + object: :batch + ) + end + + sig do + override.returns( + { + id: String, + completion_window: String, + created_at: Integer, + endpoint: String, + input_file_id: String, + object: Symbol, + status: OpenAI::Batch::Status::TaggedSymbol, + cancelled_at: Integer, + cancelling_at: Integer, + completed_at: Integer, + error_file_id: String, + errors: OpenAI::Batch::Errors, + expired_at: Integer, + expires_at: Integer, + failed_at: Integer, + finalizing_at: Integer, + in_progress_at: Integer, + metadata: T.nilable(T::Hash[Symbol, String]), + output_file_id: String, + request_counts: OpenAI::BatchRequestCounts + } + ) + end + def to_hash + end + + # The current status of the batch. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::Batch::Status) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + VALIDATING = T.let(:validating, OpenAI::Batch::Status::TaggedSymbol) + FAILED = T.let(:failed, OpenAI::Batch::Status::TaggedSymbol) + IN_PROGRESS = T.let(:in_progress, OpenAI::Batch::Status::TaggedSymbol) + FINALIZING = T.let(:finalizing, OpenAI::Batch::Status::TaggedSymbol) + COMPLETED = T.let(:completed, OpenAI::Batch::Status::TaggedSymbol) + EXPIRED = T.let(:expired, OpenAI::Batch::Status::TaggedSymbol) + CANCELLING = T.let(:cancelling, OpenAI::Batch::Status::TaggedSymbol) + CANCELLED = T.let(:cancelled, OpenAI::Batch::Status::TaggedSymbol) + + sig { override.returns(T::Array[OpenAI::Batch::Status::TaggedSymbol]) } + def self.values + end + end + + class Errors < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Batch::Errors, OpenAI::Internal::AnyHash) + end + + sig { returns(T.nilable(T::Array[OpenAI::BatchError])) } + attr_reader :data + + sig { params(data: T::Array[OpenAI::BatchError::OrHash]).void } + attr_writer :data + + # The object type, which is always `list`. + sig { returns(T.nilable(String)) } + attr_reader :object + + sig { params(object: String).void } + attr_writer :object + + sig do + params( + data: T::Array[OpenAI::BatchError::OrHash], + object: String + ).returns(T.attached_class) + end + def self.new( + data: nil, + # The object type, which is always `list`. + object: nil + ) + end + + sig do + override.returns( + { data: T::Array[OpenAI::BatchError], object: String } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/batch_cancel_params.rbi b/rbi/openai/models/batch_cancel_params.rbi new file mode 100644 index 00000000..b6a056fc --- /dev/null +++ b/rbi/openai/models/batch_cancel_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class BatchCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::BatchCancelParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/batch_create_params.rbi b/rbi/openai/models/batch_create_params.rbi new file mode 100644 index 00000000..c15c791a --- /dev/null +++ b/rbi/openai/models/batch_create_params.rbi @@ -0,0 +1,230 @@ +# typed: strong + +module OpenAI + module Models + class BatchCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::BatchCreateParams, OpenAI::Internal::AnyHash) + end + + # The time frame within which the batch should be processed. Currently only `24h` + # is supported. + sig { returns(OpenAI::BatchCreateParams::CompletionWindow::OrSymbol) } + attr_accessor :completion_window + + # The endpoint to be used for all requests in the batch. Currently + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. + sig { returns(OpenAI::BatchCreateParams::Endpoint::OrSymbol) } + attr_accessor :endpoint + + # The ID of an uploaded file that contains requests for the new batch. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. + sig { returns(String) } + attr_accessor :input_file_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The expiration policy for the output and/or error file that are generated for a + # batch. + sig { returns(T.nilable(OpenAI::BatchCreateParams::OutputExpiresAfter)) } + attr_reader :output_expires_after + + sig do + params( + output_expires_after: + OpenAI::BatchCreateParams::OutputExpiresAfter::OrHash + ).void + end + attr_writer :output_expires_after + + sig do + params( + completion_window: + OpenAI::BatchCreateParams::CompletionWindow::OrSymbol, + endpoint: OpenAI::BatchCreateParams::Endpoint::OrSymbol, + input_file_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + output_expires_after: + OpenAI::BatchCreateParams::OutputExpiresAfter::OrHash, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The time frame within which the batch should be processed. Currently only `24h` + # is supported. + completion_window:, + # The endpoint to be used for all requests in the batch. Currently + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. + endpoint:, + # The ID of an uploaded file that contains requests for the new batch. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. + input_file_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The expiration policy for the output and/or error file that are generated for a + # batch. + output_expires_after: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + completion_window: + OpenAI::BatchCreateParams::CompletionWindow::OrSymbol, + endpoint: OpenAI::BatchCreateParams::Endpoint::OrSymbol, + input_file_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The time frame within which the batch should be processed. Currently only `24h` + # is supported. + module CompletionWindow + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::BatchCreateParams::CompletionWindow) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + COMPLETION_WINDOW_24H = + T.let( + :"24h", + OpenAI::BatchCreateParams::CompletionWindow::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::BatchCreateParams::CompletionWindow::TaggedSymbol] + ) + end + def self.values + end + end + + # The endpoint to be used for all requests in the batch. Currently + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. + module Endpoint + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::BatchCreateParams::Endpoint) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + V1_RESPONSES = + T.let( + :"/v1/responses", + OpenAI::BatchCreateParams::Endpoint::TaggedSymbol + ) + V1_CHAT_COMPLETIONS = + T.let( + :"/v1/chat/completions", + OpenAI::BatchCreateParams::Endpoint::TaggedSymbol + ) + V1_EMBEDDINGS = + T.let( + :"/v1/embeddings", + OpenAI::BatchCreateParams::Endpoint::TaggedSymbol + ) + V1_COMPLETIONS = + T.let( + :"/v1/completions", + OpenAI::BatchCreateParams::Endpoint::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::BatchCreateParams::Endpoint::TaggedSymbol] + ) + end + def self.values + end + end + + class OutputExpiresAfter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::BatchCreateParams::OutputExpiresAfter, + OpenAI::Internal::AnyHash + ) + end + + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `created_at`. Note that the anchor is the file creation time, not the time the + # batch is created. + sig { returns(Symbol) } + attr_accessor :anchor + + # The number of seconds after the anchor time that the file will expire. Must be + # between 3600 (1 hour) and 2592000 (30 days). + sig { returns(Integer) } + attr_accessor :seconds + + # The expiration policy for the output and/or error file that are generated for a + # batch. + sig do + params(seconds: Integer, anchor: Symbol).returns(T.attached_class) + end + def self.new( + # The number of seconds after the anchor time that the file will expire. Must be + # between 3600 (1 hour) and 2592000 (30 days). + seconds:, + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `created_at`. Note that the anchor is the file creation time, not the time the + # batch is created. + anchor: :created_at + ) + end + + sig { override.returns({ anchor: Symbol, seconds: Integer }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/batch_error.rbi b/rbi/openai/models/batch_error.rbi new file mode 100644 index 00000000..4cade073 --- /dev/null +++ b/rbi/openai/models/batch_error.rbi @@ -0,0 +1,65 @@ +# typed: strong + +module OpenAI + module Models + class BatchError < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::BatchError, OpenAI::Internal::AnyHash) } + + # An error code identifying the error type. + sig { returns(T.nilable(String)) } + attr_reader :code + + sig { params(code: String).void } + attr_writer :code + + # The line number of the input file where the error occurred, if applicable. + sig { returns(T.nilable(Integer)) } + attr_accessor :line + + # A human-readable message providing more details about the error. + sig { returns(T.nilable(String)) } + attr_reader :message + + sig { params(message: String).void } + attr_writer :message + + # The name of the parameter that caused the error, if applicable. + sig { returns(T.nilable(String)) } + attr_accessor :param + + sig do + params( + code: String, + line: T.nilable(Integer), + message: String, + param: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + # An error code identifying the error type. + code: nil, + # The line number of the input file where the error occurred, if applicable. + line: nil, + # A human-readable message providing more details about the error. + message: nil, + # The name of the parameter that caused the error, if applicable. + param: nil + ) + end + + sig do + override.returns( + { + code: String, + line: T.nilable(Integer), + message: String, + param: T.nilable(String) + } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/batch_list_params.rbi b/rbi/openai/models/batch_list_params.rbi new file mode 100644 index 00000000..7000c142 --- /dev/null +++ b/rbi/openai/models/batch_list_params.rbi @@ -0,0 +1,65 @@ +# typed: strong + +module OpenAI + module Models + class BatchListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::BatchListParams, OpenAI::Internal::AnyHash) + end + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + sig do + params( + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/batch_request_counts.rbi b/rbi/openai/models/batch_request_counts.rbi new file mode 100644 index 00000000..88cf5914 --- /dev/null +++ b/rbi/openai/models/batch_request_counts.rbi @@ -0,0 +1,48 @@ +# typed: strong + +module OpenAI + module Models + class BatchRequestCounts < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::BatchRequestCounts, OpenAI::Internal::AnyHash) + end + + # Number of requests that have been completed successfully. + sig { returns(Integer) } + attr_accessor :completed + + # Number of requests that have failed. + sig { returns(Integer) } + attr_accessor :failed + + # Total number of requests in the batch. + sig { returns(Integer) } + attr_accessor :total + + # The request counts for different statuses within the batch. + sig do + params(completed: Integer, failed: Integer, total: Integer).returns( + T.attached_class + ) + end + def self.new( + # Number of requests that have been completed successfully. + completed:, + # Number of requests that have failed. + failed:, + # Total number of requests in the batch. + total: + ) + end + + sig do + override.returns( + { completed: Integer, failed: Integer, total: Integer } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/batch_retrieve_params.rbi b/rbi/openai/models/batch_retrieve_params.rbi new file mode 100644 index 00000000..f749ed91 --- /dev/null +++ b/rbi/openai/models/batch_retrieve_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class BatchRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::BatchRetrieveParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/beta/assistant.rbi b/rbi/openai/models/beta/assistant.rbi new file mode 100644 index 00000000..7842b859 --- /dev/null +++ b/rbi/openai/models/beta/assistant.rbi @@ -0,0 +1,384 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class Assistant < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::Assistant, OpenAI::Internal::AnyHash) + end + + # The identifier, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the assistant was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The description of the assistant. The maximum length is 512 characters. + sig { returns(T.nilable(String)) } + attr_accessor :description + + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. + sig { returns(T.nilable(String)) } + attr_accessor :instructions + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + sig { returns(String) } + attr_accessor :model + + # The name of the assistant. The maximum length is 256 characters. + sig { returns(T.nilable(String)) } + attr_accessor :name + + # The object type, which is always `assistant`. + sig { returns(Symbol) } + attr_accessor :object + + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. + sig { returns(T::Array[OpenAI::Beta::AssistantTool::Variants]) } + attr_accessor :tools + + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + sig do + returns( + T.nilable(OpenAI::Beta::AssistantResponseFormatOption::Variants) + ) + end + attr_accessor :response_format + + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + sig { returns(T.nilable(OpenAI::Beta::Assistant::ToolResources)) } + attr_reader :tool_resources + + sig do + params( + tool_resources: + T.nilable(OpenAI::Beta::Assistant::ToolResources::OrHash) + ).void + end + attr_writer :tool_resources + + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # Represents an `assistant` that can call the model and use tools. + sig do + params( + id: String, + created_at: Integer, + description: T.nilable(String), + instructions: T.nilable(String), + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + name: T.nilable(String), + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ], + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + tool_resources: + T.nilable(OpenAI::Beta::Assistant::ToolResources::OrHash), + top_p: T.nilable(Float), + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier, which can be referenced in API endpoints. + id:, + # The Unix timestamp (in seconds) for when the assistant was created. + created_at:, + # The description of the assistant. The maximum length is 512 characters. + description:, + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. + instructions:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model:, + # The name of the assistant. The maximum length is 256 characters. + name:, + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. + tools:, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + temperature: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + tool_resources: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + # The object type, which is always `assistant`. + object: :assistant + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + description: T.nilable(String), + instructions: T.nilable(String), + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + name: T.nilable(String), + object: Symbol, + tools: T::Array[OpenAI::Beta::AssistantTool::Variants], + response_format: + T.nilable( + OpenAI::Beta::AssistantResponseFormatOption::Variants + ), + temperature: T.nilable(Float), + tool_resources: T.nilable(OpenAI::Beta::Assistant::ToolResources), + top_p: T.nilable(Float) + } + ) + end + def to_hash + end + + class ToolResources < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Assistant::ToolResources, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + T.nilable(OpenAI::Beta::Assistant::ToolResources::CodeInterpreter) + ) + end + attr_reader :code_interpreter + + sig do + params( + code_interpreter: + OpenAI::Beta::Assistant::ToolResources::CodeInterpreter::OrHash + ).void + end + attr_writer :code_interpreter + + sig do + returns( + T.nilable(OpenAI::Beta::Assistant::ToolResources::FileSearch) + ) + end + attr_reader :file_search + + sig do + params( + file_search: + OpenAI::Beta::Assistant::ToolResources::FileSearch::OrHash + ).void + end + attr_writer :file_search + + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + sig do + params( + code_interpreter: + OpenAI::Beta::Assistant::ToolResources::CodeInterpreter::OrHash, + file_search: + OpenAI::Beta::Assistant::ToolResources::FileSearch::OrHash + ).returns(T.attached_class) + end + def self.new(code_interpreter: nil, file_search: nil) + end + + sig do + override.returns( + { + code_interpreter: + OpenAI::Beta::Assistant::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::Assistant::ToolResources::FileSearch + } + ) + end + def to_hash + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Assistant::ToolResources::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter`` tool. There can be a maximum of 20 files + # associated with the tool. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + sig { params(file_ids: T::Array[String]).returns(T.attached_class) } + def self.new( + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter`` tool. There can be a maximum of 20 files + # associated with the tool. + file_ids: nil + ) + end + + sig { override.returns({ file_ids: T::Array[String] }) } + def to_hash + end + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Assistant::ToolResources::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :vector_store_ids + + sig { params(vector_store_ids: T::Array[String]).void } + attr_writer :vector_store_ids + + sig do + params(vector_store_ids: T::Array[String]).returns( + T.attached_class + ) + end + def self.new( + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. + vector_store_ids: nil + ) + end + + sig { override.returns({ vector_store_ids: T::Array[String] }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_create_params.rbi b/rbi/openai/models/beta/assistant_create_params.rbi new file mode 100644 index 00000000..f4783b7e --- /dev/null +++ b/rbi/openai/models/beta/assistant_create_params.rbi @@ -0,0 +1,758 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class AssistantCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + sig { returns(T.any(String, OpenAI::ChatModel::OrSymbol)) } + attr_accessor :model + + # The description of the assistant. The maximum length is 512 characters. + sig { returns(T.nilable(String)) } + attr_accessor :description + + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. + sig { returns(T.nilable(String)) } + attr_accessor :instructions + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The name of the assistant. The maximum length is 256 characters. + sig { returns(T.nilable(String)) } + attr_accessor :name + + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } + attr_accessor :reasoning_effort + + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + sig do + returns( + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONObject, + OpenAI::ResponseFormatJSONSchema + ) + ) + ) + end + attr_accessor :response_format + + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + sig do + returns(T.nilable(OpenAI::Beta::AssistantCreateParams::ToolResources)) + end + attr_reader :tool_resources + + sig do + params( + tool_resources: + T.nilable( + OpenAI::Beta::AssistantCreateParams::ToolResources::OrHash + ) + ).void + end + attr_writer :tool_resources + + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::FileSearchTool, + OpenAI::Beta::FunctionTool + ) + ] + ) + ) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ] + ).void + end + attr_writer :tools + + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + sig do + params( + model: T.any(String, OpenAI::ChatModel::OrSymbol), + description: T.nilable(String), + instructions: T.nilable(String), + metadata: T.nilable(T::Hash[Symbol, String]), + name: T.nilable(String), + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + tool_resources: + T.nilable( + OpenAI::Beta::AssistantCreateParams::ToolResources::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ], + top_p: T.nilable(Float), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model:, + # The description of the assistant. The maximum length is 512 characters. + description: nil, + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. + instructions: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the assistant. The maximum length is 256 characters. + name: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + temperature: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + tool_resources: nil, + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. + tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + model: T.any(String, OpenAI::ChatModel::OrSymbol), + description: T.nilable(String), + instructions: T.nilable(String), + metadata: T.nilable(T::Hash[Symbol, String]), + name: T.nilable(String), + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONObject, + OpenAI::ResponseFormatJSONSchema + ) + ), + temperature: T.nilable(Float), + tool_resources: + T.nilable(OpenAI::Beta::AssistantCreateParams::ToolResources), + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::FileSearchTool, + OpenAI::Beta::FunctionTool + ) + ], + top_p: T.nilable(Float), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::ChatModel::TaggedSymbol) } + + sig do + override.returns( + T::Array[OpenAI::Beta::AssistantCreateParams::Model::Variants] + ) + end + def self.variants + end + end + + class ToolResources < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + T.nilable( + OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter + ) + ) + end + attr_reader :code_interpreter + + sig do + params( + code_interpreter: + OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter::OrHash + ).void + end + attr_writer :code_interpreter + + sig do + returns( + T.nilable( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch + ) + ) + end + attr_reader :file_search + + sig do + params( + file_search: + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::OrHash + ).void + end + attr_writer :file_search + + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + sig do + params( + code_interpreter: + OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter::OrHash, + file_search: + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::OrHash + ).returns(T.attached_class) + end + def self.new(code_interpreter: nil, file_search: nil) + end + + sig do + override.returns( + { + code_interpreter: + OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, + file_search: + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch + } + ) + end + def to_hash + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + sig { params(file_ids: T::Array[String]).returns(T.attached_class) } + def self.new( + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + file_ids: nil + ) + end + + sig { override.returns({ file_ids: T::Array[String] }) } + def to_hash + end + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :vector_store_ids + + sig { params(vector_store_ids: T::Array[String]).void } + attr_writer :vector_store_ids + + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this assistant. There can be a maximum of 1 + # vector store attached to the assistant. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore + ] + ) + ) + end + attr_reader :vector_stores + + sig do + params( + vector_stores: + T::Array[ + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::OrHash + ] + ).void + end + attr_writer :vector_stores + + sig do + params( + vector_store_ids: T::Array[String], + vector_stores: + T::Array[ + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::OrHash + ] + ).returns(T.attached_class) + end + def self.new( + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. + vector_store_ids: nil, + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this assistant. There can be a maximum of 1 + # vector store attached to the assistant. + vector_stores: nil + ) + end + + sig do + override.returns( + { + vector_store_ids: T::Array[String], + vector_stores: + T::Array[ + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore + ] + } + ) + end + def to_hash + end + + class VectorStore < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore, + OpenAI::Internal::AnyHash + ) + end + + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. + sig do + returns( + T.nilable( + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ) + ) + ) + end + attr_reader :chunking_strategy + + sig do + params( + chunking_strategy: + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto::OrHash, + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::OrHash + ) + ).void + end + attr_writer :chunking_strategy + + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + chunking_strategy: + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto::OrHash, + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::OrHash + ), + file_ids: T::Array[String], + metadata: T.nilable(T::Hash[Symbol, String]) + ).returns(T.attached_class) + end + def self.new( + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. + chunking_strategy: nil, + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. + file_ids: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil + ) + end + + sig do + override.returns( + { + chunking_strategy: + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ), + file_ids: T::Array[String], + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. + module ChunkingStrategy + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ) + end + + class Auto < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Internal::AnyHash + ) + end + + # Always `auto`. + sig { returns(Symbol) } + attr_accessor :type + + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Always `auto`. + type: :auto + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + class Static < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static + ) + end + attr_reader :static + + sig do + params( + static: + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static::OrHash + ).void + end + attr_writer :static + + # Always `static`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + static: + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + static:, + # Always `static`. + type: :static + ) + end + + sig do + override.returns( + { + static: + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + type: Symbol + } + ) + end + def to_hash + end + + class Static < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + sig { returns(Integer) } + attr_accessor :chunk_overlap_tokens + + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. + sig { returns(Integer) } + attr_accessor :max_chunk_size_tokens + + sig do + params( + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + chunk_overlap_tokens:, + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. + max_chunk_size_tokens: + ) + end + + sig do + override.returns( + { + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + } + ) + end + def to_hash + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_delete_params.rbi b/rbi/openai/models/beta/assistant_delete_params.rbi new file mode 100644 index 00000000..d9efdc8d --- /dev/null +++ b/rbi/openai/models/beta/assistant_delete_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class AssistantDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantDeleteParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_deleted.rbi b/rbi/openai/models/beta/assistant_deleted.rbi new file mode 100644 index 00000000..6925eb3b --- /dev/null +++ b/rbi/openai/models/beta/assistant_deleted.rbi @@ -0,0 +1,37 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class AssistantDeleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::AssistantDeleted, OpenAI::Internal::AnyHash) + end + + sig { returns(String) } + attr_accessor :id + + sig { returns(T::Boolean) } + attr_accessor :deleted + + sig { returns(Symbol) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: Symbol).returns( + T.attached_class + ) + end + def self.new(id:, deleted:, object: :"assistant.deleted") + end + + sig do + override.returns({ id: String, deleted: T::Boolean, object: Symbol }) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_list_params.rbi b/rbi/openai/models/beta/assistant_list_params.rbi new file mode 100644 index 00000000..0d1099af --- /dev/null +++ b/rbi/openai/models/beta/assistant_list_params.rbi @@ -0,0 +1,126 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class AssistantListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Beta::AssistantListParams, OpenAI::Internal::AnyHash) + end + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + sig { returns(T.nilable(String)) } + attr_reader :before + + sig { params(before: String).void } + attr_writer :before + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + sig do + returns(T.nilable(OpenAI::Beta::AssistantListParams::Order::OrSymbol)) + end + attr_reader :order + + sig do + params(order: OpenAI::Beta::AssistantListParams::Order::OrSymbol).void + end + attr_writer :order + + sig do + params( + after: String, + before: String, + limit: Integer, + order: OpenAI::Beta::AssistantListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + before: String, + limit: Integer, + order: OpenAI::Beta::AssistantListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::AssistantListParams::Order) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let(:asc, OpenAI::Beta::AssistantListParams::Order::TaggedSymbol) + DESC = + T.let(:desc, OpenAI::Beta::AssistantListParams::Order::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Beta::AssistantListParams::Order::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_response_format_option.rbi b/rbi/openai/models/beta/assistant_response_format_option.rbi new file mode 100644 index 00000000..744863b5 --- /dev/null +++ b/rbi/openai/models/beta/assistant_response_format_option.rbi @@ -0,0 +1,49 @@ +# typed: strong + +module OpenAI + module Models + module Beta + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + module AssistantResponseFormatOption + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + Symbol, + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONObject, + OpenAI::ResponseFormatJSONSchema + ) + end + + sig do + override.returns( + T::Array[OpenAI::Beta::AssistantResponseFormatOption::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_retrieve_params.rbi b/rbi/openai/models/beta/assistant_retrieve_params.rbi new file mode 100644 index 00000000..4f006925 --- /dev/null +++ b/rbi/openai/models/beta/assistant_retrieve_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class AssistantRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_stream_event.rbi b/rbi/openai/models/beta/assistant_stream_event.rbi new file mode 100644 index 00000000..fc236a1b --- /dev/null +++ b/rbi/openai/models/beta/assistant_stream_event.rbi @@ -0,0 +1,1170 @@ +# typed: strong + +module OpenAI + module Models + module Beta + # Represents an event emitted when streaming a Run. + # + # Each event in a server-sent events stream has an `event` and `data` property: + # + # ``` + # event: thread.created + # data: {"id": "thread_123", "object": "thread", ...} + # ``` + # + # We emit events whenever a new object is created, transitions to a new state, or + # is being streamed in parts (deltas). For example, we emit `thread.run.created` + # when a new run is created, `thread.run.completed` when a run completes, and so + # on. When an Assistant chooses to create a message during a run, we emit a + # `thread.message.created event`, a `thread.message.in_progress` event, many + # `thread.message.delta` events, and finally a `thread.message.completed` event. + # + # We may add additional events over time, so we recommend handling unknown events + # gracefully in your code. See the + # [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) + # to learn how to integrate the Assistants API with streaming. + module AssistantStreamEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadCreated, + OpenAI::Beta::AssistantStreamEvent::ThreadRunCreated, + OpenAI::Beta::AssistantStreamEvent::ThreadRunQueued, + OpenAI::Beta::AssistantStreamEvent::ThreadRunInProgress, + OpenAI::Beta::AssistantStreamEvent::ThreadRunRequiresAction, + OpenAI::Beta::AssistantStreamEvent::ThreadRunCompleted, + OpenAI::Beta::AssistantStreamEvent::ThreadRunIncomplete, + OpenAI::Beta::AssistantStreamEvent::ThreadRunFailed, + OpenAI::Beta::AssistantStreamEvent::ThreadRunCancelling, + OpenAI::Beta::AssistantStreamEvent::ThreadRunCancelled, + OpenAI::Beta::AssistantStreamEvent::ThreadRunExpired, + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCreated, + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepInProgress, + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepDelta, + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCompleted, + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepFailed, + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCancelled, + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepExpired, + OpenAI::Beta::AssistantStreamEvent::ThreadMessageCreated, + OpenAI::Beta::AssistantStreamEvent::ThreadMessageInProgress, + OpenAI::Beta::AssistantStreamEvent::ThreadMessageDelta, + OpenAI::Beta::AssistantStreamEvent::ThreadMessageCompleted, + OpenAI::Beta::AssistantStreamEvent::ThreadMessageIncomplete, + OpenAI::Beta::AssistantStreamEvent::ErrorEvent + ) + end + + class ThreadCreated < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadCreated, + OpenAI::Internal::AnyHash + ) + end + + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). + sig { returns(OpenAI::Beta::Thread) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Thread::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Whether to enable input audio transcription. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :enabled + + sig { params(enabled: T::Boolean).void } + attr_writer :enabled + + # Occurs when a new + # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # created. + sig do + params( + data: OpenAI::Beta::Thread::OrHash, + enabled: T::Boolean, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). + data:, + # Whether to enable input audio transcription. + enabled: nil, + event: :"thread.created" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Thread, event: Symbol, enabled: T::Boolean } + ) + end + def to_hash + end + end + + class ThreadRunCreated < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunCreated, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.created" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunQueued < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunQueued, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `queued` status. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.queued" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunInProgress, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to an `in_progress` status. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.in_progress" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunRequiresAction, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `requires_action` status. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.requires_action" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunCompleted, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is completed. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.completed" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunIncomplete, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # ends with status `incomplete`. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.incomplete" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunFailed < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunFailed, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # fails. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.failed" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunCancelling, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `cancelling` status. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.cancelling" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunCancelled, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is cancelled. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.cancelled" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunExpired < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunExpired, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # expires. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.expired" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepCreated < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCreated, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.created" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepInProgress, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # moves to an `in_progress` state. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.in_progress" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepDelta, + OpenAI::Internal::AnyHash + ) + end + + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. + sig { returns(OpenAI::Beta::Threads::Runs::RunStepDeltaEvent) } + attr_reader :data + + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent::OrHash + ).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when parts of a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # are being streamed. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. + data:, + event: :"thread.run.step.delta" + ) + end + + sig do + override.returns( + { + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent, + event: Symbol + } + ) + end + def to_hash + end + end + + class ThreadRunStepCompleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCompleted, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is completed. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.completed" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepFailed < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepFailed, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # fails. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.failed" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepCancelled < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCancelled, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is cancelled. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.cancelled" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadRunStepExpired, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # expires. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.expired" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadMessageCreated, + OpenAI::Internal::AnyHash + ) + end + + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Message) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Message::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. + sig do + params( + data: OpenAI::Beta::Threads::Message::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.message.created" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Message, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadMessageInProgress, + OpenAI::Internal::AnyHash + ) + end + + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Message) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Message::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # to an `in_progress` state. + sig do + params( + data: OpenAI::Beta::Threads::Message::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.message.in_progress" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Message, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadMessageDelta, + OpenAI::Internal::AnyHash + ) + end + + # Represents a message delta i.e. any changed fields on a message during + # streaming. + sig { returns(OpenAI::Beta::Threads::MessageDeltaEvent) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::MessageDeltaEvent::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when parts of a + # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # being streamed. + sig do + params( + data: OpenAI::Beta::Threads::MessageDeltaEvent::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a message delta i.e. any changed fields on a message during + # streaming. + data:, + event: :"thread.message.delta" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::MessageDeltaEvent, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadMessageCompleted, + OpenAI::Internal::AnyHash + ) + end + + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Message) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Message::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # completed. + sig do + params( + data: OpenAI::Beta::Threads::Message::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.message.completed" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Message, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ThreadMessageIncomplete, + OpenAI::Internal::AnyHash + ) + end + + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Message) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Message::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # before it is completed. + sig do + params( + data: OpenAI::Beta::Threads::Message::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.message.incomplete" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Message, event: Symbol } + ) + end + def to_hash + end + end + + class ErrorEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantStreamEvent::ErrorEvent, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::ErrorObject) } + attr_reader :data + + sig { params(data: OpenAI::ErrorObject::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when an + # [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. + # This can happen due to an internal server error or a timeout. + sig do + params(data: OpenAI::ErrorObject::OrHash, event: Symbol).returns( + T.attached_class + ) + end + def self.new(data:, event: :error) + end + + sig { override.returns({ data: OpenAI::ErrorObject, event: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[OpenAI::Beta::AssistantStreamEvent::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_tool.rbi b/rbi/openai/models/beta/assistant_tool.rbi new file mode 100644 index 00000000..84570448 --- /dev/null +++ b/rbi/openai/models/beta/assistant_tool.rbi @@ -0,0 +1,26 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module AssistantTool + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::FileSearchTool, + OpenAI::Beta::FunctionTool + ) + end + + sig do + override.returns(T::Array[OpenAI::Beta::AssistantTool::Variants]) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_tool_choice.rbi b/rbi/openai/models/beta/assistant_tool_choice.rbi new file mode 100644 index 00000000..11dd4c1a --- /dev/null +++ b/rbi/openai/models/beta/assistant_tool_choice.rbi @@ -0,0 +1,89 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class AssistantToolChoice < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::AssistantToolChoice, OpenAI::Internal::AnyHash) + end + + # The type of the tool. If type is `function`, the function name must be set + sig { returns(OpenAI::Beta::AssistantToolChoice::Type::OrSymbol) } + attr_accessor :type + + sig { returns(T.nilable(OpenAI::Beta::AssistantToolChoiceFunction)) } + attr_reader :function + + sig do + params( + function: OpenAI::Beta::AssistantToolChoiceFunction::OrHash + ).void + end + attr_writer :function + + # Specifies a tool the model should use. Use to force the model to call a specific + # tool. + sig do + params( + type: OpenAI::Beta::AssistantToolChoice::Type::OrSymbol, + function: OpenAI::Beta::AssistantToolChoiceFunction::OrHash + ).returns(T.attached_class) + end + def self.new( + # The type of the tool. If type is `function`, the function name must be set + type:, + function: nil + ) + end + + sig do + override.returns( + { + type: OpenAI::Beta::AssistantToolChoice::Type::OrSymbol, + function: OpenAI::Beta::AssistantToolChoiceFunction + } + ) + end + def to_hash + end + + # The type of the tool. If type is `function`, the function name must be set + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::AssistantToolChoice::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + FUNCTION = + T.let( + :function, + OpenAI::Beta::AssistantToolChoice::Type::TaggedSymbol + ) + CODE_INTERPRETER = + T.let( + :code_interpreter, + OpenAI::Beta::AssistantToolChoice::Type::TaggedSymbol + ) + FILE_SEARCH = + T.let( + :file_search, + OpenAI::Beta::AssistantToolChoice::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Beta::AssistantToolChoice::Type::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_tool_choice_function.rbi b/rbi/openai/models/beta/assistant_tool_choice_function.rbi new file mode 100644 index 00000000..88ed6dbc --- /dev/null +++ b/rbi/openai/models/beta/assistant_tool_choice_function.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class AssistantToolChoiceFunction < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantToolChoiceFunction, + OpenAI::Internal::AnyHash + ) + end + + # The name of the function to call. + sig { returns(String) } + attr_accessor :name + + sig { params(name: String).returns(T.attached_class) } + def self.new( + # The name of the function to call. + name: + ) + end + + sig { override.returns({ name: String }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_tool_choice_option.rbi b/rbi/openai/models/beta/assistant_tool_choice_option.rbi new file mode 100644 index 00000000..823ebeb1 --- /dev/null +++ b/rbi/openai/models/beta/assistant_tool_choice_option.rbi @@ -0,0 +1,74 @@ +# typed: strong + +module OpenAI + module Models + module Beta + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + module AssistantToolChoiceOption + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::TaggedSymbol, + OpenAI::Beta::AssistantToolChoice + ) + end + + # `none` means the model will not call any tools and instead generates a message. + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools before + # responding to the user. + module Auto + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::AssistantToolChoiceOption::Auto) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + NONE = + T.let( + :none, + OpenAI::Beta::AssistantToolChoiceOption::Auto::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::Beta::AssistantToolChoiceOption::Auto::TaggedSymbol + ) + REQUIRED = + T.let( + :required, + OpenAI::Beta::AssistantToolChoiceOption::Auto::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::AssistantToolChoiceOption::Auto::TaggedSymbol + ] + ) + end + def self.values + end + end + + sig do + override.returns( + T::Array[OpenAI::Beta::AssistantToolChoiceOption::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/beta/assistant_update_params.rbi b/rbi/openai/models/beta/assistant_update_params.rbi new file mode 100644 index 00000000..217e526e --- /dev/null +++ b/rbi/openai/models/beta/assistant_update_params.rbi @@ -0,0 +1,710 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantUpdateParams, + OpenAI::Internal::AnyHash + ) + end + + # The description of the assistant. The maximum length is 512 characters. + sig { returns(T.nilable(String)) } + attr_accessor :description + + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. + sig { returns(T.nilable(String)) } + attr_accessor :instructions + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + sig do + returns( + T.nilable( + T.any( + String, + OpenAI::Beta::AssistantUpdateParams::Model::OrSymbol + ) + ) + ) + end + attr_reader :model + + sig do + params( + model: + T.any( + String, + OpenAI::Beta::AssistantUpdateParams::Model::OrSymbol + ) + ).void + end + attr_writer :model + + # The name of the assistant. The maximum length is 256 characters. + sig { returns(T.nilable(String)) } + attr_accessor :name + + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } + attr_accessor :reasoning_effort + + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + sig do + returns( + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONObject, + OpenAI::ResponseFormatJSONSchema + ) + ) + ) + end + attr_accessor :response_format + + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + sig do + returns(T.nilable(OpenAI::Beta::AssistantUpdateParams::ToolResources)) + end + attr_reader :tool_resources + + sig do + params( + tool_resources: + T.nilable( + OpenAI::Beta::AssistantUpdateParams::ToolResources::OrHash + ) + ).void + end + attr_writer :tool_resources + + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::FileSearchTool, + OpenAI::Beta::FunctionTool + ) + ] + ) + ) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ] + ).void + end + attr_writer :tools + + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + sig do + params( + description: T.nilable(String), + instructions: T.nilable(String), + metadata: T.nilable(T::Hash[Symbol, String]), + model: + T.any( + String, + OpenAI::Beta::AssistantUpdateParams::Model::OrSymbol + ), + name: T.nilable(String), + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + tool_resources: + T.nilable( + OpenAI::Beta::AssistantUpdateParams::ToolResources::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ], + top_p: T.nilable(Float), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The description of the assistant. The maximum length is 512 characters. + description: nil, + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. + instructions: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model: nil, + # The name of the assistant. The maximum length is 256 characters. + name: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + temperature: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + tool_resources: nil, + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. + tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + description: T.nilable(String), + instructions: T.nilable(String), + metadata: T.nilable(T::Hash[Symbol, String]), + model: + T.any( + String, + OpenAI::Beta::AssistantUpdateParams::Model::OrSymbol + ), + name: T.nilable(String), + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONObject, + OpenAI::ResponseFormatJSONSchema + ) + ), + temperature: T.nilable(Float), + tool_resources: + T.nilable(OpenAI::Beta::AssistantUpdateParams::ToolResources), + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::FileSearchTool, + OpenAI::Beta::FunctionTool + ) + ], + top_p: T.nilable(Float), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + end + + sig do + override.returns( + T::Array[OpenAI::Beta::AssistantUpdateParams::Model::Variants] + ) + end + def self.variants + end + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::AssistantUpdateParams::Model) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + GPT_5 = + T.let( + :"gpt-5", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_5_MINI = + T.let( + :"gpt-5-mini", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_5_NANO = + T.let( + :"gpt-5-nano", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_5_2025_08_07 = + T.let( + :"gpt-5-2025-08-07", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_5_MINI_2025_08_07 = + T.let( + :"gpt-5-mini-2025-08-07", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_5_NANO_2025_08_07 = + T.let( + :"gpt-5-nano-2025-08-07", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_1 = + T.let( + :"gpt-4.1", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_1_MINI = + T.let( + :"gpt-4.1-mini", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_1_NANO = + T.let( + :"gpt-4.1-nano", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_1_2025_04_14 = + T.let( + :"gpt-4.1-2025-04-14", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_1_MINI_2025_04_14 = + T.let( + :"gpt-4.1-mini-2025-04-14", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_1_NANO_2025_04_14 = + T.let( + :"gpt-4.1-nano-2025-04-14", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + O3_MINI = + T.let( + :"o3-mini", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + O3_MINI_2025_01_31 = + T.let( + :"o3-mini-2025-01-31", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + O1 = + T.let(:o1, OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol) + O1_2024_12_17 = + T.let( + :"o1-2024-12-17", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4O = + T.let( + :"gpt-4o", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4O_2024_11_20 = + T.let( + :"gpt-4o-2024-11-20", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4O_2024_08_06 = + T.let( + :"gpt-4o-2024-08-06", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4O_2024_05_13 = + T.let( + :"gpt-4o-2024-05-13", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4O_MINI = + T.let( + :"gpt-4o-mini", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4O_MINI_2024_07_18 = + T.let( + :"gpt-4o-mini-2024-07-18", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_5_PREVIEW = + T.let( + :"gpt-4.5-preview", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_5_PREVIEW_2025_02_27 = + T.let( + :"gpt-4.5-preview-2025-02-27", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_TURBO = + T.let( + :"gpt-4-turbo", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_TURBO_2024_04_09 = + T.let( + :"gpt-4-turbo-2024-04-09", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_0125_PREVIEW = + T.let( + :"gpt-4-0125-preview", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_TURBO_PREVIEW = + T.let( + :"gpt-4-turbo-preview", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_1106_PREVIEW = + T.let( + :"gpt-4-1106-preview", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_VISION_PREVIEW = + T.let( + :"gpt-4-vision-preview", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4 = + T.let( + :"gpt-4", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_0314 = + T.let( + :"gpt-4-0314", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_0613 = + T.let( + :"gpt-4-0613", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_32K = + T.let( + :"gpt-4-32k", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_32K_0314 = + T.let( + :"gpt-4-32k-0314", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_4_32K_0613 = + T.let( + :"gpt-4-32k-0613", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_3_5_TURBO = + T.let( + :"gpt-3.5-turbo", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_3_5_TURBO_16K = + T.let( + :"gpt-3.5-turbo-16k", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_3_5_TURBO_0613 = + T.let( + :"gpt-3.5-turbo-0613", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_3_5_TURBO_1106 = + T.let( + :"gpt-3.5-turbo-1106", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_3_5_TURBO_0125 = + T.let( + :"gpt-3.5-turbo-0125", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_3_5_TURBO_16K_0613 = + T.let( + :"gpt-3.5-turbo-16k-0613", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + end + + class ToolResources < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantUpdateParams::ToolResources, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + T.nilable( + OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter + ) + ) + end + attr_reader :code_interpreter + + sig do + params( + code_interpreter: + OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter::OrHash + ).void + end + attr_writer :code_interpreter + + sig do + returns( + T.nilable( + OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch + ) + ) + end + attr_reader :file_search + + sig do + params( + file_search: + OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch::OrHash + ).void + end + attr_writer :file_search + + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + sig do + params( + code_interpreter: + OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter::OrHash, + file_search: + OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch::OrHash + ).returns(T.attached_class) + end + def self.new(code_interpreter: nil, file_search: nil) + end + + sig do + override.returns( + { + code_interpreter: + OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, + file_search: + OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch + } + ) + end + def to_hash + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # Overrides the list of + # [file](https://platform.openai.com/docs/api-reference/files) IDs made available + # to the `code_interpreter` tool. There can be a maximum of 20 files associated + # with the tool. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + sig { params(file_ids: T::Array[String]).returns(T.attached_class) } + def self.new( + # Overrides the list of + # [file](https://platform.openai.com/docs/api-reference/files) IDs made available + # to the `code_interpreter` tool. There can be a maximum of 20 files associated + # with the tool. + file_ids: nil + ) + end + + sig { override.returns({ file_ids: T::Array[String] }) } + def to_hash + end + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # Overrides the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :vector_store_ids + + sig { params(vector_store_ids: T::Array[String]).void } + attr_writer :vector_store_ids + + sig do + params(vector_store_ids: T::Array[String]).returns( + T.attached_class + ) + end + def self.new( + # Overrides the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. + vector_store_ids: nil + ) + end + + sig { override.returns({ vector_store_ids: T::Array[String] }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/code_interpreter_tool.rbi b/rbi/openai/models/beta/code_interpreter_tool.rbi new file mode 100644 index 00000000..3d315b25 --- /dev/null +++ b/rbi/openai/models/beta/code_interpreter_tool.rbi @@ -0,0 +1,29 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class CodeInterpreterTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::CodeInterpreterTool, OpenAI::Internal::AnyHash) + end + + # The type of tool being defined: `code_interpreter` + sig { returns(Symbol) } + attr_accessor :type + + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of tool being defined: `code_interpreter` + type: :code_interpreter + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/beta/file_search_tool.rbi b/rbi/openai/models/beta/file_search_tool.rbi new file mode 100644 index 00000000..656a81da --- /dev/null +++ b/rbi/openai/models/beta/file_search_tool.rbi @@ -0,0 +1,246 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class FileSearchTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::FileSearchTool, OpenAI::Internal::AnyHash) + end + + # The type of tool being defined: `file_search` + sig { returns(Symbol) } + attr_accessor :type + + # Overrides for the file search tool. + sig { returns(T.nilable(OpenAI::Beta::FileSearchTool::FileSearch)) } + attr_reader :file_search + + sig do + params( + file_search: OpenAI::Beta::FileSearchTool::FileSearch::OrHash + ).void + end + attr_writer :file_search + + sig do + params( + file_search: OpenAI::Beta::FileSearchTool::FileSearch::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Overrides for the file search tool. + file_search: nil, + # The type of tool being defined: `file_search` + type: :file_search + ) + end + + sig do + override.returns( + { + type: Symbol, + file_search: OpenAI::Beta::FileSearchTool::FileSearch + } + ) + end + def to_hash + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::FileSearchTool::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The maximum number of results the file search tool should output. The default is + # 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between + # 1 and 50 inclusive. + # + # Note that the file search tool may output fewer than `max_num_results` results. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + sig { returns(T.nilable(Integer)) } + attr_reader :max_num_results + + sig { params(max_num_results: Integer).void } + attr_writer :max_num_results + + # The ranking options for the file search. If not specified, the file search tool + # will use the `auto` ranker and a score_threshold of 0. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + sig do + returns( + T.nilable( + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions + ) + ) + end + attr_reader :ranking_options + + sig do + params( + ranking_options: + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::OrHash + ).void + end + attr_writer :ranking_options + + # Overrides for the file search tool. + sig do + params( + max_num_results: Integer, + ranking_options: + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The maximum number of results the file search tool should output. The default is + # 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between + # 1 and 50 inclusive. + # + # Note that the file search tool may output fewer than `max_num_results` results. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + max_num_results: nil, + # The ranking options for the file search. If not specified, the file search tool + # will use the `auto` ranker and a score_threshold of 0. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + ranking_options: nil + ) + end + + sig do + override.returns( + { + max_num_results: Integer, + ranking_options: + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions + } + ) + end + def to_hash + end + + class RankingOptions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions, + OpenAI::Internal::AnyHash + ) + end + + # The score threshold for the file search. All values must be a floating point + # number between 0 and 1. + sig { returns(Float) } + attr_accessor :score_threshold + + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. + sig do + returns( + T.nilable( + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker::OrSymbol + ) + ) + end + attr_reader :ranker + + sig do + params( + ranker: + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker::OrSymbol + ).void + end + attr_writer :ranker + + # The ranking options for the file search. If not specified, the file search tool + # will use the `auto` ranker and a score_threshold of 0. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + sig do + params( + score_threshold: Float, + ranker: + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The score threshold for the file search. All values must be a floating point + # number between 0 and 1. + score_threshold:, + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. + ranker: nil + ) + end + + sig do + override.returns( + { + score_threshold: Float, + ranker: + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker::OrSymbol + } + ) + end + def to_hash + end + + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. + module Ranker + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker::TaggedSymbol + ) + DEFAULT_2024_08_21 = + T.let( + :default_2024_08_21, + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/function_tool.rbi b/rbi/openai/models/beta/function_tool.rbi new file mode 100644 index 00000000..87fcce58 --- /dev/null +++ b/rbi/openai/models/beta/function_tool.rbi @@ -0,0 +1,45 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class FunctionTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::FunctionTool, OpenAI::Internal::AnyHash) + end + + sig { returns(OpenAI::FunctionDefinition) } + attr_reader :function + + sig { params(function: OpenAI::FunctionDefinition::OrHash).void } + attr_writer :function + + # The type of tool being defined: `function` + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + function: OpenAI::FunctionDefinition::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + function:, + # The type of tool being defined: `function` + type: :function + ) + end + + sig do + override.returns( + { function: OpenAI::FunctionDefinition, type: Symbol } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/beta/message_stream_event.rbi b/rbi/openai/models/beta/message_stream_event.rbi new file mode 100644 index 00000000..01893282 --- /dev/null +++ b/rbi/openai/models/beta/message_stream_event.rbi @@ -0,0 +1,263 @@ +# typed: strong + +module OpenAI + module Models + module Beta + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. + module MessageStreamEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::MessageStreamEvent::ThreadMessageCreated, + OpenAI::Beta::MessageStreamEvent::ThreadMessageInProgress, + OpenAI::Beta::MessageStreamEvent::ThreadMessageDelta, + OpenAI::Beta::MessageStreamEvent::ThreadMessageCompleted, + OpenAI::Beta::MessageStreamEvent::ThreadMessageIncomplete + ) + end + + class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::MessageStreamEvent::ThreadMessageCreated, + OpenAI::Internal::AnyHash + ) + end + + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Message) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Message::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. + sig do + params( + data: OpenAI::Beta::Threads::Message::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.message.created" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Message, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::MessageStreamEvent::ThreadMessageInProgress, + OpenAI::Internal::AnyHash + ) + end + + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Message) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Message::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # to an `in_progress` state. + sig do + params( + data: OpenAI::Beta::Threads::Message::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.message.in_progress" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Message, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::MessageStreamEvent::ThreadMessageDelta, + OpenAI::Internal::AnyHash + ) + end + + # Represents a message delta i.e. any changed fields on a message during + # streaming. + sig { returns(OpenAI::Beta::Threads::MessageDeltaEvent) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::MessageDeltaEvent::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when parts of a + # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # being streamed. + sig do + params( + data: OpenAI::Beta::Threads::MessageDeltaEvent::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a message delta i.e. any changed fields on a message during + # streaming. + data:, + event: :"thread.message.delta" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::MessageDeltaEvent, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::MessageStreamEvent::ThreadMessageCompleted, + OpenAI::Internal::AnyHash + ) + end + + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Message) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Message::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # completed. + sig do + params( + data: OpenAI::Beta::Threads::Message::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.message.completed" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Message, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::MessageStreamEvent::ThreadMessageIncomplete, + OpenAI::Internal::AnyHash + ) + end + + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Message) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Message::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # before it is completed. + sig do + params( + data: OpenAI::Beta::Threads::Message::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.message.incomplete" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Message, event: Symbol } + ) + end + def to_hash + end + end + + sig do + override.returns(T::Array[OpenAI::Beta::MessageStreamEvent::Variants]) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/beta/run_step_stream_event.rbi b/rbi/openai/models/beta/run_step_stream_event.rbi new file mode 100644 index 00000000..5d42a8d3 --- /dev/null +++ b/rbi/openai/models/beta/run_step_stream_event.rbi @@ -0,0 +1,362 @@ +# typed: strong + +module OpenAI + module Models + module Beta + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. + module RunStepStreamEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCreated, + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepInProgress, + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepDelta, + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCompleted, + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepFailed, + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCancelled, + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepExpired + ) + end + + class ThreadRunStepCreated < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCreated, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.created" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepInProgress, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # moves to an `in_progress` state. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.in_progress" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepDelta, + OpenAI::Internal::AnyHash + ) + end + + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. + sig { returns(OpenAI::Beta::Threads::Runs::RunStepDeltaEvent) } + attr_reader :data + + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent::OrHash + ).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when parts of a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # are being streamed. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. + data:, + event: :"thread.run.step.delta" + ) + end + + sig do + override.returns( + { + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent, + event: Symbol + } + ) + end + def to_hash + end + end + + class ThreadRunStepCompleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCompleted, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is completed. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.completed" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepFailed < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepFailed, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # fails. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.failed" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepCancelled < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCancelled, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is cancelled. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.cancelled" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepExpired, + OpenAI::Internal::AnyHash + ) + end + + # Represents a step in execution of a run. + sig { returns(OpenAI::Beta::Threads::Runs::RunStep) } + attr_reader :data + + sig do + params(data: OpenAI::Beta::Threads::Runs::RunStep::OrHash).void + end + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # expires. + sig do + params( + data: OpenAI::Beta::Threads::Runs::RunStep::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a step in execution of a run. + data:, + event: :"thread.run.step.expired" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Runs::RunStep, event: Symbol } + ) + end + def to_hash + end + end + + sig do + override.returns(T::Array[OpenAI::Beta::RunStepStreamEvent::Variants]) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/beta/run_stream_event.rbi b/rbi/openai/models/beta/run_stream_event.rbi new file mode 100644 index 00000000..ed6893e1 --- /dev/null +++ b/rbi/openai/models/beta/run_stream_event.rbi @@ -0,0 +1,485 @@ +# typed: strong + +module OpenAI + module Models + module Beta + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + module RunStreamEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunCreated, + OpenAI::Beta::RunStreamEvent::ThreadRunQueued, + OpenAI::Beta::RunStreamEvent::ThreadRunInProgress, + OpenAI::Beta::RunStreamEvent::ThreadRunRequiresAction, + OpenAI::Beta::RunStreamEvent::ThreadRunCompleted, + OpenAI::Beta::RunStreamEvent::ThreadRunIncomplete, + OpenAI::Beta::RunStreamEvent::ThreadRunFailed, + OpenAI::Beta::RunStreamEvent::ThreadRunCancelling, + OpenAI::Beta::RunStreamEvent::ThreadRunCancelled, + OpenAI::Beta::RunStreamEvent::ThreadRunExpired + ) + end + + class ThreadRunCreated < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunCreated, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.created" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunQueued < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunQueued, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `queued` status. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.queued" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunInProgress, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to an `in_progress` status. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.in_progress" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunRequiresAction, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `requires_action` status. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.requires_action" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunCompleted, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is completed. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.completed" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunIncomplete, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # ends with status `incomplete`. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.incomplete" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunFailed < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunFailed, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # fails. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.failed" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunCancelling, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `cancelling` status. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.cancelling" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunCancelled, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is cancelled. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.cancelled" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + class ThreadRunExpired < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::RunStreamEvent::ThreadRunExpired, + OpenAI::Internal::AnyHash + ) + end + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig { returns(OpenAI::Beta::Threads::Run) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Threads::Run::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # expires. + sig do + params( + data: OpenAI::Beta::Threads::Run::OrHash, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + data:, + event: :"thread.run.expired" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Threads::Run, event: Symbol } + ) + end + def to_hash + end + end + + sig do + override.returns(T::Array[OpenAI::Beta::RunStreamEvent::Variants]) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/beta/thread.rbi b/rbi/openai/models/beta/thread.rbi new file mode 100644 index 00000000..e828a1aa --- /dev/null +++ b/rbi/openai/models/beta/thread.rbi @@ -0,0 +1,233 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class Thread < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::Thread, OpenAI::Internal::AnyHash) + end + + # The identifier, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the thread was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The object type, which is always `thread`. + sig { returns(Symbol) } + attr_accessor :object + + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + sig { returns(T.nilable(OpenAI::Beta::Thread::ToolResources)) } + attr_reader :tool_resources + + sig do + params( + tool_resources: + T.nilable(OpenAI::Beta::Thread::ToolResources::OrHash) + ).void + end + attr_writer :tool_resources + + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). + sig do + params( + id: String, + created_at: Integer, + metadata: T.nilable(T::Hash[Symbol, String]), + tool_resources: + T.nilable(OpenAI::Beta::Thread::ToolResources::OrHash), + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier, which can be referenced in API endpoints. + id:, + # The Unix timestamp (in seconds) for when the thread was created. + created_at:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + tool_resources:, + # The object type, which is always `thread`. + object: :thread + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + metadata: T.nilable(T::Hash[Symbol, String]), + object: Symbol, + tool_resources: T.nilable(OpenAI::Beta::Thread::ToolResources) + } + ) + end + def to_hash + end + + class ToolResources < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Thread::ToolResources, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + T.nilable(OpenAI::Beta::Thread::ToolResources::CodeInterpreter) + ) + end + attr_reader :code_interpreter + + sig do + params( + code_interpreter: + OpenAI::Beta::Thread::ToolResources::CodeInterpreter::OrHash + ).void + end + attr_writer :code_interpreter + + sig do + returns(T.nilable(OpenAI::Beta::Thread::ToolResources::FileSearch)) + end + attr_reader :file_search + + sig do + params( + file_search: + OpenAI::Beta::Thread::ToolResources::FileSearch::OrHash + ).void + end + attr_writer :file_search + + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + sig do + params( + code_interpreter: + OpenAI::Beta::Thread::ToolResources::CodeInterpreter::OrHash, + file_search: + OpenAI::Beta::Thread::ToolResources::FileSearch::OrHash + ).returns(T.attached_class) + end + def self.new(code_interpreter: nil, file_search: nil) + end + + sig do + override.returns( + { + code_interpreter: + OpenAI::Beta::Thread::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::Thread::ToolResources::FileSearch + } + ) + end + def to_hash + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Thread::ToolResources::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + sig { params(file_ids: T::Array[String]).returns(T.attached_class) } + def self.new( + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + file_ids: nil + ) + end + + sig { override.returns({ file_ids: T::Array[String] }) } + def to_hash + end + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Thread::ToolResources::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :vector_store_ids + + sig { params(vector_store_ids: T::Array[String]).void } + attr_writer :vector_store_ids + + sig do + params(vector_store_ids: T::Array[String]).returns( + T.attached_class + ) + end + def self.new( + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. + vector_store_ids: nil + ) + end + + sig { override.returns({ vector_store_ids: T::Array[String] }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/thread_create_and_run_params.rbi b/rbi/openai/models/beta/thread_create_and_run_params.rbi new file mode 100644 index 00000000..31e4023d --- /dev/null +++ b/rbi/openai/models/beta/thread_create_and_run_params.rbi @@ -0,0 +1,1535 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. + sig { returns(String) } + attr_accessor :assistant_id + + # Override the default system message of the assistant. This is useful for + # modifying the behavior on a per-run basis. + sig { returns(T.nilable(String)) } + attr_accessor :instructions + + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_completion_tokens + + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_prompt_tokens + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. + sig { returns(T.nilable(T.any(String, OpenAI::ChatModel::OrSymbol))) } + attr_accessor :model + + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :parallel_tool_calls + + sig { params(parallel_tool_calls: T::Boolean).void } + attr_writer :parallel_tool_calls + + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + sig do + returns( + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONObject, + OpenAI::ResponseFormatJSONSchema + ) + ) + ) + end + attr_accessor :response_format + + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. + sig do + returns(T.nilable(OpenAI::Beta::ThreadCreateAndRunParams::Thread)) + end + attr_reader :thread + + sig do + params( + thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread::OrHash + ).void + end + attr_writer :thread + + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + sig do + returns( + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice + ) + ) + ) + end + attr_accessor :tool_choice + + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + sig do + returns( + T.nilable(OpenAI::Beta::ThreadCreateAndRunParams::ToolResources) + ) + end + attr_reader :tool_resources + + sig do + params( + tool_resources: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::OrHash + ) + ).void + end + attr_writer :tool_resources + + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::FileSearchTool, + OpenAI::Beta::FunctionTool + ) + ] + ) + ) + end + attr_accessor :tools + + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + sig do + returns( + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy + ) + ) + end + attr_reader :truncation_strategy + + sig do + params( + truncation_strategy: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::OrHash + ) + ).void + end + attr_writer :truncation_strategy + + sig do + params( + assistant_id: String, + instructions: T.nilable(String), + max_completion_tokens: T.nilable(Integer), + max_prompt_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: T.nilable(T.any(String, OpenAI::ChatModel::OrSymbol)), + parallel_tool_calls: T::Boolean, + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread::OrHash, + tool_choice: + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice::OrHash + ) + ), + tool_resources: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::OrHash + ), + tools: + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ] + ), + top_p: T.nilable(Float), + truncation_strategy: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. + assistant_id:, + # Override the default system message of the assistant. This is useful for + # modifying the behavior on a per-run basis. + instructions: nil, + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + max_completion_tokens: nil, + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + max_prompt_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. + model: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + parallel_tool_calls: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + temperature: nil, + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. + thread: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + tool_choice: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + tool_resources: nil, + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. + tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + truncation_strategy: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + assistant_id: String, + instructions: T.nilable(String), + max_completion_tokens: T.nilable(Integer), + max_prompt_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: T.nilable(T.any(String, OpenAI::ChatModel::OrSymbol)), + parallel_tool_calls: T::Boolean, + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONObject, + OpenAI::ResponseFormatJSONSchema + ) + ), + temperature: T.nilable(Float), + thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread, + tool_choice: + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice + ) + ), + tool_resources: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources + ), + tools: + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::FileSearchTool, + OpenAI::Beta::FunctionTool + ) + ] + ), + top_p: T.nilable(Float), + truncation_strategy: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy + ), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::ChatModel::TaggedSymbol) } + + sig do + override.returns( + T::Array[OpenAI::Beta::ThreadCreateAndRunParams::Model::Variants] + ) + end + def self.variants + end + end + + class Thread < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread, + OpenAI::Internal::AnyHash + ) + end + + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message + ] + ) + ) + end + attr_reader :messages + + sig do + params( + messages: + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::OrHash + ] + ).void + end + attr_writer :messages + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + sig do + returns( + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources + ) + ) + end + attr_reader :tool_resources + + sig do + params( + tool_resources: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::OrHash + ) + ).void + end + attr_writer :tool_resources + + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. + sig do + params( + messages: + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::OrHash + ], + metadata: T.nilable(T::Hash[Symbol, String]), + tool_resources: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. + messages: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + tool_resources: nil + ) + end + + sig do + override.returns( + { + messages: + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message + ], + metadata: T.nilable(T::Hash[Symbol, String]), + tool_resources: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources + ) + } + ) + end + def to_hash + end + + class Message < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message, + OpenAI::Internal::AnyHash + ) + end + + # The text contents of the message. + sig do + returns( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Content::Variants + ) + end + attr_accessor :content + + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + sig do + returns( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Role::OrSymbol + ) + end + attr_accessor :role + + # A list of files attached to the message, and the tools they should be added to. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment + ] + ) + ) + end + attr_accessor :attachments + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + content: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Content::Variants, + role: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Role::OrSymbol, + attachments: + T.nilable( + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::OrHash + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]) + ).returns(T.attached_class) + end + def self.new( + # The text contents of the message. + content:, + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + role:, + # A list of files attached to the message, and the tools they should be added to. + attachments: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil + ) + end + + sig do + override.returns( + { + content: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Content::Variants, + role: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Role::OrSymbol, + attachments: + T.nilable( + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + + # The text contents of the message. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[ + OpenAI::Beta::Threads::MessageContentPartParam::Variants + ] + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Content::Variants + ] + ) + end + def self.variants + end + + MessageContentPartParamArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::Beta::Threads::MessageContentPartParam + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + class Attachment < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file to attach to the message. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + # The tools to add this file to. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch + ) + ] + ) + ) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch::OrHash + ) + ] + ).void + end + attr_writer :tools + + sig do + params( + file_id: String, + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch::OrHash + ) + ] + ).returns(T.attached_class) + end + def self.new( + # The ID of the file to attach to the message. + file_id: nil, + # The tools to add this file to. + tools: nil + ) + end + + sig do + override.returns( + { + file_id: String, + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch + ) + ] + } + ) + end + def to_hash + end + + module Tool + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch + ) + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The type of tool being defined: `file_search` + sig { returns(Symbol) } + attr_accessor :type + + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of tool being defined: `file_search` + type: :file_search + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::Variants + ] + ) + end + def self.variants + end + end + end + end + + class ToolResources < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter + ) + ) + end + attr_reader :code_interpreter + + sig do + params( + code_interpreter: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter::OrHash + ).void + end + attr_writer :code_interpreter + + sig do + returns( + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch + ) + ) + end + attr_reader :file_search + + sig do + params( + file_search: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::OrHash + ).void + end + attr_writer :file_search + + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + sig do + params( + code_interpreter: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter::OrHash, + file_search: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::OrHash + ).returns(T.attached_class) + end + def self.new(code_interpreter: nil, file_search: nil) + end + + sig do + override.returns( + { + code_interpreter: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, + file_search: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch + } + ) + end + def to_hash + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + sig do + params(file_ids: T::Array[String]).returns(T.attached_class) + end + def self.new( + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + file_ids: nil + ) + end + + sig { override.returns({ file_ids: T::Array[String] }) } + def to_hash + end + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :vector_store_ids + + sig { params(vector_store_ids: T::Array[String]).void } + attr_writer :vector_store_ids + + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore + ] + ) + ) + end + attr_reader :vector_stores + + sig do + params( + vector_stores: + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::OrHash + ] + ).void + end + attr_writer :vector_stores + + sig do + params( + vector_store_ids: T::Array[String], + vector_stores: + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::OrHash + ] + ).returns(T.attached_class) + end + def self.new( + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. + vector_store_ids: nil, + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. + vector_stores: nil + ) + end + + sig do + override.returns( + { + vector_store_ids: T::Array[String], + vector_stores: + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore + ] + } + ) + end + def to_hash + end + + class VectorStore < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore, + OpenAI::Internal::AnyHash + ) + end + + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. + sig do + returns( + T.nilable( + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ) + ) + ) + end + attr_reader :chunking_strategy + + sig do + params( + chunking_strategy: + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto::OrHash, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::OrHash + ) + ).void + end + attr_writer :chunking_strategy + + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + chunking_strategy: + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto::OrHash, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::OrHash + ), + file_ids: T::Array[String], + metadata: T.nilable(T::Hash[Symbol, String]) + ).returns(T.attached_class) + end + def self.new( + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. + chunking_strategy: nil, + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. + file_ids: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil + ) + end + + sig do + override.returns( + { + chunking_strategy: + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ), + file_ids: T::Array[String], + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. + module ChunkingStrategy + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ) + end + + class Auto < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Internal::AnyHash + ) + end + + # Always `auto`. + sig { returns(Symbol) } + attr_accessor :type + + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Always `auto`. + type: :auto + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + class Static < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static + ) + end + attr_reader :static + + sig do + params( + static: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static::OrHash + ).void + end + attr_writer :static + + # Always `static`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + static: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + static:, + # Always `static`. + type: :static + ) + end + + sig do + override.returns( + { + static: + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + type: Symbol + } + ) + end + def to_hash + end + + class Static < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + sig { returns(Integer) } + attr_accessor :chunk_overlap_tokens + + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. + sig { returns(Integer) } + attr_accessor :max_chunk_size_tokens + + sig do + params( + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + chunk_overlap_tokens:, + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. + max_chunk_size_tokens: + ) + end + + sig do + override.returns( + { + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + } + ) + end + def to_hash + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end + + class ToolResources < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter + ) + ) + end + attr_reader :code_interpreter + + sig do + params( + code_interpreter: + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter::OrHash + ).void + end + attr_writer :code_interpreter + + sig do + returns( + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch + ) + ) + end + attr_reader :file_search + + sig do + params( + file_search: + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch::OrHash + ).void + end + attr_writer :file_search + + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + sig do + params( + code_interpreter: + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter::OrHash, + file_search: + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch::OrHash + ).returns(T.attached_class) + end + def self.new(code_interpreter: nil, file_search: nil) + end + + sig do + override.returns( + { + code_interpreter: + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, + file_search: + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch + } + ) + end + def to_hash + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + sig { params(file_ids: T::Array[String]).returns(T.attached_class) } + def self.new( + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + file_ids: nil + ) + end + + sig { override.returns({ file_ids: T::Array[String] }) } + def to_hash + end + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :vector_store_ids + + sig { params(vector_store_ids: T::Array[String]).void } + attr_writer :vector_store_ids + + sig do + params(vector_store_ids: T::Array[String]).returns( + T.attached_class + ) + end + def self.new( + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. + vector_store_ids: nil + ) + end + + sig { override.returns({ vector_store_ids: T::Array[String] }) } + def to_hash + end + end + end + + class TruncationStrategy < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy, + OpenAI::Internal::AnyHash + ) + end + + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + sig do + returns( + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type::OrSymbol + ) + end + attr_accessor :type + + # The number of most recent messages from the thread when constructing the context + # for the run. + sig { returns(T.nilable(Integer)) } + attr_accessor :last_messages + + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + sig do + params( + type: + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type::OrSymbol, + last_messages: T.nilable(Integer) + ).returns(T.attached_class) + end + def self.new( + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + type:, + # The number of most recent messages from the thread when constructing the context + # for the run. + last_messages: nil + ) + end + + sig do + override.returns( + { + type: + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type::OrSymbol, + last_messages: T.nilable(Integer) + } + ) + end + def to_hash + end + + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type::TaggedSymbol + ) + LAST_MESSAGES = + T.let( + :last_messages, + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/thread_create_params.rbi b/rbi/openai/models/beta/thread_create_params.rbi new file mode 100644 index 00000000..da3aedab --- /dev/null +++ b/rbi/openai/models/beta/thread_create_params.rbi @@ -0,0 +1,847 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class ThreadCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Beta::ThreadCreateParams, OpenAI::Internal::AnyHash) + end + + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. + sig do + returns( + T.nilable(T::Array[OpenAI::Beta::ThreadCreateParams::Message]) + ) + end + attr_reader :messages + + sig do + params( + messages: + T::Array[OpenAI::Beta::ThreadCreateParams::Message::OrHash] + ).void + end + attr_writer :messages + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + sig do + returns(T.nilable(OpenAI::Beta::ThreadCreateParams::ToolResources)) + end + attr_reader :tool_resources + + sig do + params( + tool_resources: + T.nilable(OpenAI::Beta::ThreadCreateParams::ToolResources::OrHash) + ).void + end + attr_writer :tool_resources + + sig do + params( + messages: + T::Array[OpenAI::Beta::ThreadCreateParams::Message::OrHash], + metadata: T.nilable(T::Hash[Symbol, String]), + tool_resources: + T.nilable( + OpenAI::Beta::ThreadCreateParams::ToolResources::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. + messages: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + tool_resources: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + messages: T::Array[OpenAI::Beta::ThreadCreateParams::Message], + metadata: T.nilable(T::Hash[Symbol, String]), + tool_resources: + T.nilable(OpenAI::Beta::ThreadCreateParams::ToolResources), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + class Message < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::Message, + OpenAI::Internal::AnyHash + ) + end + + # The text contents of the message. + sig do + returns( + OpenAI::Beta::ThreadCreateParams::Message::Content::Variants + ) + end + attr_accessor :content + + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + sig do + returns(OpenAI::Beta::ThreadCreateParams::Message::Role::OrSymbol) + end + attr_accessor :role + + # A list of files attached to the message, and the tools they should be added to. + sig do + returns( + T.nilable( + T::Array[OpenAI::Beta::ThreadCreateParams::Message::Attachment] + ) + ) + end + attr_accessor :attachments + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + content: + OpenAI::Beta::ThreadCreateParams::Message::Content::Variants, + role: OpenAI::Beta::ThreadCreateParams::Message::Role::OrSymbol, + attachments: + T.nilable( + T::Array[ + OpenAI::Beta::ThreadCreateParams::Message::Attachment::OrHash + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]) + ).returns(T.attached_class) + end + def self.new( + # The text contents of the message. + content:, + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + role:, + # A list of files attached to the message, and the tools they should be added to. + attachments: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil + ) + end + + sig do + override.returns( + { + content: + OpenAI::Beta::ThreadCreateParams::Message::Content::Variants, + role: OpenAI::Beta::ThreadCreateParams::Message::Role::OrSymbol, + attachments: + T.nilable( + T::Array[ + OpenAI::Beta::ThreadCreateParams::Message::Attachment + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + + # The text contents of the message. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[ + OpenAI::Beta::Threads::MessageContentPartParam::Variants + ] + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::ThreadCreateParams::Message::Content::Variants + ] + ) + end + def self.variants + end + + MessageContentPartParamArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::Beta::Threads::MessageContentPartParam + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::ThreadCreateParams::Message::Role) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Beta::ThreadCreateParams::Message::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Beta::ThreadCreateParams::Message::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::ThreadCreateParams::Message::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + class Attachment < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::Message::Attachment, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file to attach to the message. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + # The tools to add this file to. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch + ) + ] + ) + ) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch::OrHash + ) + ] + ).void + end + attr_writer :tools + + sig do + params( + file_id: String, + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch::OrHash + ) + ] + ).returns(T.attached_class) + end + def self.new( + # The ID of the file to attach to the message. + file_id: nil, + # The tools to add this file to. + tools: nil + ) + end + + sig do + override.returns( + { + file_id: String, + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch + ) + ] + } + ) + end + def to_hash + end + + module Tool + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch + ) + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The type of tool being defined: `file_search` + sig { returns(Symbol) } + attr_accessor :type + + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of tool being defined: `file_search` + type: :file_search + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool::Variants + ] + ) + end + def self.variants + end + end + end + end + + class ToolResources < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + T.nilable( + OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter + ) + ) + end + attr_reader :code_interpreter + + sig do + params( + code_interpreter: + OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter::OrHash + ).void + end + attr_writer :code_interpreter + + sig do + returns( + T.nilable( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch + ) + ) + end + attr_reader :file_search + + sig do + params( + file_search: + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::OrHash + ).void + end + attr_writer :file_search + + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + sig do + params( + code_interpreter: + OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter::OrHash, + file_search: + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::OrHash + ).returns(T.attached_class) + end + def self.new(code_interpreter: nil, file_search: nil) + end + + sig do + override.returns( + { + code_interpreter: + OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, + file_search: + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch + } + ) + end + def to_hash + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + sig { params(file_ids: T::Array[String]).returns(T.attached_class) } + def self.new( + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + file_ids: nil + ) + end + + sig { override.returns({ file_ids: T::Array[String] }) } + def to_hash + end + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :vector_store_ids + + sig { params(vector_store_ids: T::Array[String]).void } + attr_writer :vector_store_ids + + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore + ] + ) + ) + end + attr_reader :vector_stores + + sig do + params( + vector_stores: + T::Array[ + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::OrHash + ] + ).void + end + attr_writer :vector_stores + + sig do + params( + vector_store_ids: T::Array[String], + vector_stores: + T::Array[ + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::OrHash + ] + ).returns(T.attached_class) + end + def self.new( + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. + vector_store_ids: nil, + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. + vector_stores: nil + ) + end + + sig do + override.returns( + { + vector_store_ids: T::Array[String], + vector_stores: + T::Array[ + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore + ] + } + ) + end + def to_hash + end + + class VectorStore < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore, + OpenAI::Internal::AnyHash + ) + end + + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. + sig do + returns( + T.nilable( + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ) + ) + ) + end + attr_reader :chunking_strategy + + sig do + params( + chunking_strategy: + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto::OrHash, + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::OrHash + ) + ).void + end + attr_writer :chunking_strategy + + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + chunking_strategy: + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto::OrHash, + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::OrHash + ), + file_ids: T::Array[String], + metadata: T.nilable(T::Hash[Symbol, String]) + ).returns(T.attached_class) + end + def self.new( + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. + chunking_strategy: nil, + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. + file_ids: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil + ) + end + + sig do + override.returns( + { + chunking_strategy: + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ), + file_ids: T::Array[String], + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. + module ChunkingStrategy + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + ) + end + + class Auto < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, + OpenAI::Internal::AnyHash + ) + end + + # Always `auto`. + sig { returns(Symbol) } + attr_accessor :type + + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Always `auto`. + type: :auto + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + class Static < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static + ) + end + attr_reader :static + + sig do + params( + static: + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static::OrHash + ).void + end + attr_writer :static + + # Always `static`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + static: + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + static:, + # Always `static`. + type: :static + ) + end + + sig do + override.returns( + { + static: + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + type: Symbol + } + ) + end + def to_hash + end + + class Static < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + sig { returns(Integer) } + attr_accessor :chunk_overlap_tokens + + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. + sig { returns(Integer) } + attr_accessor :max_chunk_size_tokens + + sig do + params( + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + chunk_overlap_tokens:, + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. + max_chunk_size_tokens: + ) + end + + sig do + override.returns( + { + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + } + ) + end + def to_hash + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/thread_delete_params.rbi b/rbi/openai/models/beta/thread_delete_params.rbi new file mode 100644 index 00000000..0909624a --- /dev/null +++ b/rbi/openai/models/beta/thread_delete_params.rbi @@ -0,0 +1,29 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class ThreadDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Beta::ThreadDeleteParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/beta/thread_deleted.rbi b/rbi/openai/models/beta/thread_deleted.rbi new file mode 100644 index 00000000..62402b00 --- /dev/null +++ b/rbi/openai/models/beta/thread_deleted.rbi @@ -0,0 +1,37 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class ThreadDeleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::ThreadDeleted, OpenAI::Internal::AnyHash) + end + + sig { returns(String) } + attr_accessor :id + + sig { returns(T::Boolean) } + attr_accessor :deleted + + sig { returns(Symbol) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: Symbol).returns( + T.attached_class + ) + end + def self.new(id:, deleted:, object: :"thread.deleted") + end + + sig do + override.returns({ id: String, deleted: T::Boolean, object: Symbol }) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/beta/thread_retrieve_params.rbi b/rbi/openai/models/beta/thread_retrieve_params.rbi new file mode 100644 index 00000000..8e36a4e7 --- /dev/null +++ b/rbi/openai/models/beta/thread_retrieve_params.rbi @@ -0,0 +1,29 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class ThreadRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Beta::ThreadRetrieveParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/beta/thread_stream_event.rbi b/rbi/openai/models/beta/thread_stream_event.rbi new file mode 100644 index 00000000..4674c89f --- /dev/null +++ b/rbi/openai/models/beta/thread_stream_event.rbi @@ -0,0 +1,60 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class ThreadStreamEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::ThreadStreamEvent, OpenAI::Internal::AnyHash) + end + + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). + sig { returns(OpenAI::Beta::Thread) } + attr_reader :data + + sig { params(data: OpenAI::Beta::Thread::OrHash).void } + attr_writer :data + + sig { returns(Symbol) } + attr_accessor :event + + # Whether to enable input audio transcription. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :enabled + + sig { params(enabled: T::Boolean).void } + attr_writer :enabled + + # Occurs when a new + # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # created. + sig do + params( + data: OpenAI::Beta::Thread::OrHash, + enabled: T::Boolean, + event: Symbol + ).returns(T.attached_class) + end + def self.new( + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). + data:, + # Whether to enable input audio transcription. + enabled: nil, + event: :"thread.created" + ) + end + + sig do + override.returns( + { data: OpenAI::Beta::Thread, event: Symbol, enabled: T::Boolean } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/beta/thread_update_params.rbi b/rbi/openai/models/beta/thread_update_params.rbi new file mode 100644 index 00000000..8d9389b0 --- /dev/null +++ b/rbi/openai/models/beta/thread_update_params.rbi @@ -0,0 +1,225 @@ +# typed: strong + +module OpenAI + module Models + module Beta + class ThreadUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Beta::ThreadUpdateParams, OpenAI::Internal::AnyHash) + end + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + sig do + returns(T.nilable(OpenAI::Beta::ThreadUpdateParams::ToolResources)) + end + attr_reader :tool_resources + + sig do + params( + tool_resources: + T.nilable(OpenAI::Beta::ThreadUpdateParams::ToolResources::OrHash) + ).void + end + attr_writer :tool_resources + + sig do + params( + metadata: T.nilable(T::Hash[Symbol, String]), + tool_resources: + T.nilable( + OpenAI::Beta::ThreadUpdateParams::ToolResources::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + tool_resources: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + metadata: T.nilable(T::Hash[Symbol, String]), + tool_resources: + T.nilable(OpenAI::Beta::ThreadUpdateParams::ToolResources), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + class ToolResources < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadUpdateParams::ToolResources, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + T.nilable( + OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter + ) + ) + end + attr_reader :code_interpreter + + sig do + params( + code_interpreter: + OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter::OrHash + ).void + end + attr_writer :code_interpreter + + sig do + returns( + T.nilable( + OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch + ) + ) + end + attr_reader :file_search + + sig do + params( + file_search: + OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch::OrHash + ).void + end + attr_writer :file_search + + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + sig do + params( + code_interpreter: + OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter::OrHash, + file_search: + OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch::OrHash + ).returns(T.attached_class) + end + def self.new(code_interpreter: nil, file_search: nil) + end + + sig do + override.returns( + { + code_interpreter: + OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, + file_search: + OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch + } + ) + end + def to_hash + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + sig { params(file_ids: T::Array[String]).returns(T.attached_class) } + def self.new( + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. + file_ids: nil + ) + end + + sig { override.returns({ file_ids: T::Array[String] }) } + def to_hash + end + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :vector_store_ids + + sig { params(vector_store_ids: T::Array[String]).void } + attr_writer :vector_store_ids + + sig do + params(vector_store_ids: T::Array[String]).returns( + T.attached_class + ) + end + def self.new( + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. + vector_store_ids: nil + ) + end + + sig { override.returns({ vector_store_ids: T::Array[String] }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/annotation.rbi b/rbi/openai/models/beta/threads/annotation.rbi new file mode 100644 index 00000000..ee7c4ae0 --- /dev/null +++ b/rbi/openai/models/beta/threads/annotation.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. + module Annotation + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::FileCitationAnnotation, + OpenAI::Beta::Threads::FilePathAnnotation + ) + end + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::Annotation::Variants] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/annotation_delta.rbi b/rbi/openai/models/beta/threads/annotation_delta.rbi new file mode 100644 index 00000000..6bf8aeaa --- /dev/null +++ b/rbi/openai/models/beta/threads/annotation_delta.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. + module AnnotationDelta + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::FileCitationDeltaAnnotation, + OpenAI::Beta::Threads::FilePathDeltaAnnotation + ) + end + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::AnnotationDelta::Variants] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/file_citation_annotation.rbi b/rbi/openai/models/beta/threads/file_citation_annotation.rbi new file mode 100644 index 00000000..09c69d08 --- /dev/null +++ b/rbi/openai/models/beta/threads/file_citation_annotation.rbi @@ -0,0 +1,110 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class FileCitationAnnotation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::FileCitationAnnotation, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(Integer) } + attr_accessor :end_index + + sig do + returns(OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation) + end + attr_reader :file_citation + + sig do + params( + file_citation: + OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation::OrHash + ).void + end + attr_writer :file_citation + + sig { returns(Integer) } + attr_accessor :start_index + + # The text in the message content that needs to be replaced. + sig { returns(String) } + attr_accessor :text + + # Always `file_citation`. + sig { returns(Symbol) } + attr_accessor :type + + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. + sig do + params( + end_index: Integer, + file_citation: + OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation::OrHash, + start_index: Integer, + text: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + end_index:, + file_citation:, + start_index:, + # The text in the message content that needs to be replaced. + text:, + # Always `file_citation`. + type: :file_citation + ) + end + + sig do + override.returns( + { + end_index: Integer, + file_citation: + OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation, + start_index: Integer, + text: String, + type: Symbol + } + ) + end + def to_hash + end + + class FileCitation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the specific File the citation is from. + sig { returns(String) } + attr_accessor :file_id + + sig { params(file_id: String).returns(T.attached_class) } + def self.new( + # The ID of the specific File the citation is from. + file_id: + ) + end + + sig { override.returns({ file_id: String }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/file_citation_delta_annotation.rbi b/rbi/openai/models/beta/threads/file_citation_delta_annotation.rbi new file mode 100644 index 00000000..574352cd --- /dev/null +++ b/rbi/openai/models/beta/threads/file_citation_delta_annotation.rbi @@ -0,0 +1,145 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class FileCitationDeltaAnnotation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::FileCitationDeltaAnnotation, + OpenAI::Internal::AnyHash + ) + end + + # The index of the annotation in the text content part. + sig { returns(Integer) } + attr_accessor :index + + # Always `file_citation`. + sig { returns(Symbol) } + attr_accessor :type + + sig { returns(T.nilable(Integer)) } + attr_reader :end_index + + sig { params(end_index: Integer).void } + attr_writer :end_index + + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation + ) + ) + end + attr_reader :file_citation + + sig do + params( + file_citation: + OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation::OrHash + ).void + end + attr_writer :file_citation + + sig { returns(T.nilable(Integer)) } + attr_reader :start_index + + sig { params(start_index: Integer).void } + attr_writer :start_index + + # The text in the message content that needs to be replaced. + sig { returns(T.nilable(String)) } + attr_reader :text + + sig { params(text: String).void } + attr_writer :text + + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. + sig do + params( + index: Integer, + end_index: Integer, + file_citation: + OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation::OrHash, + start_index: Integer, + text: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the annotation in the text content part. + index:, + end_index: nil, + file_citation: nil, + start_index: nil, + # The text in the message content that needs to be replaced. + text: nil, + # Always `file_citation`. + type: :file_citation + ) + end + + sig do + override.returns( + { + index: Integer, + type: Symbol, + end_index: Integer, + file_citation: + OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, + start_index: Integer, + text: String + } + ) + end + def to_hash + end + + class FileCitation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the specific File the citation is from. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + # The specific quote in the file. + sig { returns(T.nilable(String)) } + attr_reader :quote + + sig { params(quote: String).void } + attr_writer :quote + + sig do + params(file_id: String, quote: String).returns(T.attached_class) + end + def self.new( + # The ID of the specific File the citation is from. + file_id: nil, + # The specific quote in the file. + quote: nil + ) + end + + sig { override.returns({ file_id: String, quote: String }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/file_path_annotation.rbi b/rbi/openai/models/beta/threads/file_path_annotation.rbi new file mode 100644 index 00000000..6c989044 --- /dev/null +++ b/rbi/openai/models/beta/threads/file_path_annotation.rbi @@ -0,0 +1,106 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class FilePathAnnotation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::FilePathAnnotation, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(Integer) } + attr_accessor :end_index + + sig { returns(OpenAI::Beta::Threads::FilePathAnnotation::FilePath) } + attr_reader :file_path + + sig do + params( + file_path: + OpenAI::Beta::Threads::FilePathAnnotation::FilePath::OrHash + ).void + end + attr_writer :file_path + + sig { returns(Integer) } + attr_accessor :start_index + + # The text in the message content that needs to be replaced. + sig { returns(String) } + attr_accessor :text + + # Always `file_path`. + sig { returns(Symbol) } + attr_accessor :type + + # A URL for the file that's generated when the assistant used the + # `code_interpreter` tool to generate a file. + sig do + params( + end_index: Integer, + file_path: + OpenAI::Beta::Threads::FilePathAnnotation::FilePath::OrHash, + start_index: Integer, + text: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + end_index:, + file_path:, + start_index:, + # The text in the message content that needs to be replaced. + text:, + # Always `file_path`. + type: :file_path + ) + end + + sig do + override.returns( + { + end_index: Integer, + file_path: OpenAI::Beta::Threads::FilePathAnnotation::FilePath, + start_index: Integer, + text: String, + type: Symbol + } + ) + end + def to_hash + end + + class FilePath < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::FilePathAnnotation::FilePath, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file that was generated. + sig { returns(String) } + attr_accessor :file_id + + sig { params(file_id: String).returns(T.attached_class) } + def self.new( + # The ID of the file that was generated. + file_id: + ) + end + + sig { override.returns({ file_id: String }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/file_path_delta_annotation.rbi b/rbi/openai/models/beta/threads/file_path_delta_annotation.rbi new file mode 100644 index 00000000..437a2151 --- /dev/null +++ b/rbi/openai/models/beta/threads/file_path_delta_annotation.rbi @@ -0,0 +1,133 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class FilePathDeltaAnnotation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::FilePathDeltaAnnotation, + OpenAI::Internal::AnyHash + ) + end + + # The index of the annotation in the text content part. + sig { returns(Integer) } + attr_accessor :index + + # Always `file_path`. + sig { returns(Symbol) } + attr_accessor :type + + sig { returns(T.nilable(Integer)) } + attr_reader :end_index + + sig { params(end_index: Integer).void } + attr_writer :end_index + + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath + ) + ) + end + attr_reader :file_path + + sig do + params( + file_path: + OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath::OrHash + ).void + end + attr_writer :file_path + + sig { returns(T.nilable(Integer)) } + attr_reader :start_index + + sig { params(start_index: Integer).void } + attr_writer :start_index + + # The text in the message content that needs to be replaced. + sig { returns(T.nilable(String)) } + attr_reader :text + + sig { params(text: String).void } + attr_writer :text + + # A URL for the file that's generated when the assistant used the + # `code_interpreter` tool to generate a file. + sig do + params( + index: Integer, + end_index: Integer, + file_path: + OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath::OrHash, + start_index: Integer, + text: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the annotation in the text content part. + index:, + end_index: nil, + file_path: nil, + start_index: nil, + # The text in the message content that needs to be replaced. + text: nil, + # Always `file_path`. + type: :file_path + ) + end + + sig do + override.returns( + { + index: Integer, + type: Symbol, + end_index: Integer, + file_path: + OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath, + start_index: Integer, + text: String + } + ) + end + def to_hash + end + + class FilePath < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file that was generated. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + sig { params(file_id: String).returns(T.attached_class) } + def self.new( + # The ID of the file that was generated. + file_id: nil + ) + end + + sig { override.returns({ file_id: String }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/image_file.rbi b/rbi/openai/models/beta/threads/image_file.rbi new file mode 100644 index 00000000..2c805eae --- /dev/null +++ b/rbi/openai/models/beta/threads/image_file.rbi @@ -0,0 +1,102 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class ImageFile < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::Threads::ImageFile, OpenAI::Internal::AnyHash) + end + + # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. + sig { returns(String) } + attr_accessor :file_id + + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. + sig do + returns( + T.nilable(OpenAI::Beta::Threads::ImageFile::Detail::OrSymbol) + ) + end + attr_reader :detail + + sig do + params( + detail: OpenAI::Beta::Threads::ImageFile::Detail::OrSymbol + ).void + end + attr_writer :detail + + sig do + params( + file_id: String, + detail: OpenAI::Beta::Threads::ImageFile::Detail::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. + file_id:, + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. + detail: nil + ) + end + + sig do + override.returns( + { + file_id: String, + detail: OpenAI::Beta::Threads::ImageFile::Detail::OrSymbol + } + ) + end + def to_hash + end + + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. + module Detail + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::ImageFile::Detail) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Beta::Threads::ImageFile::Detail::TaggedSymbol + ) + LOW = + T.let( + :low, + OpenAI::Beta::Threads::ImageFile::Detail::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Beta::Threads::ImageFile::Detail::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::ImageFile::Detail::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/image_file_content_block.rbi b/rbi/openai/models/beta/threads/image_file_content_block.rbi new file mode 100644 index 00000000..407bd31a --- /dev/null +++ b/rbi/openai/models/beta/threads/image_file_content_block.rbi @@ -0,0 +1,54 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class ImageFileContentBlock < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::ImageFileContentBlock, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::Beta::Threads::ImageFile) } + attr_reader :image_file + + sig do + params(image_file: OpenAI::Beta::Threads::ImageFile::OrHash).void + end + attr_writer :image_file + + # Always `image_file`. + sig { returns(Symbol) } + attr_accessor :type + + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. + sig do + params( + image_file: OpenAI::Beta::Threads::ImageFile::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + image_file:, + # Always `image_file`. + type: :image_file + ) + end + + sig do + override.returns( + { image_file: OpenAI::Beta::Threads::ImageFile, type: Symbol } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/image_file_delta.rbi b/rbi/openai/models/beta/threads/image_file_delta.rbi new file mode 100644 index 00000000..86951221 --- /dev/null +++ b/rbi/openai/models/beta/threads/image_file_delta.rbi @@ -0,0 +1,113 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class ImageFileDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::ImageFileDelta, + OpenAI::Internal::AnyHash + ) + end + + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::ImageFileDelta::Detail::TaggedSymbol + ) + ) + end + attr_reader :detail + + sig do + params( + detail: OpenAI::Beta::Threads::ImageFileDelta::Detail::OrSymbol + ).void + end + attr_writer :detail + + # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + sig do + params( + detail: OpenAI::Beta::Threads::ImageFileDelta::Detail::OrSymbol, + file_id: String + ).returns(T.attached_class) + end + def self.new( + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. + detail: nil, + # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. + file_id: nil + ) + end + + sig do + override.returns( + { + detail: + OpenAI::Beta::Threads::ImageFileDelta::Detail::TaggedSymbol, + file_id: String + } + ) + end + def to_hash + end + + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. + module Detail + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::ImageFileDelta::Detail) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Beta::Threads::ImageFileDelta::Detail::TaggedSymbol + ) + LOW = + T.let( + :low, + OpenAI::Beta::Threads::ImageFileDelta::Detail::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Beta::Threads::ImageFileDelta::Detail::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::ImageFileDelta::Detail::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/image_file_delta_block.rbi b/rbi/openai/models/beta/threads/image_file_delta_block.rbi new file mode 100644 index 00000000..9efa65de --- /dev/null +++ b/rbi/openai/models/beta/threads/image_file_delta_block.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class ImageFileDeltaBlock < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::ImageFileDeltaBlock, + OpenAI::Internal::AnyHash + ) + end + + # The index of the content part in the message. + sig { returns(Integer) } + attr_accessor :index + + # Always `image_file`. + sig { returns(Symbol) } + attr_accessor :type + + sig { returns(T.nilable(OpenAI::Beta::Threads::ImageFileDelta)) } + attr_reader :image_file + + sig do + params( + image_file: OpenAI::Beta::Threads::ImageFileDelta::OrHash + ).void + end + attr_writer :image_file + + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. + sig do + params( + index: Integer, + image_file: OpenAI::Beta::Threads::ImageFileDelta::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the content part in the message. + index:, + image_file: nil, + # Always `image_file`. + type: :image_file + ) + end + + sig do + override.returns( + { + index: Integer, + type: Symbol, + image_file: OpenAI::Beta::Threads::ImageFileDelta + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/image_url.rbi b/rbi/openai/models/beta/threads/image_url.rbi new file mode 100644 index 00000000..52f2c4ad --- /dev/null +++ b/rbi/openai/models/beta/threads/image_url.rbi @@ -0,0 +1,97 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class ImageURL < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::Threads::ImageURL, OpenAI::Internal::AnyHash) + end + + # The external URL of the image, must be a supported image types: jpeg, jpg, png, + # gif, webp. + sig { returns(String) } + attr_accessor :url + + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. Default value is `auto` + sig do + returns( + T.nilable(OpenAI::Beta::Threads::ImageURL::Detail::OrSymbol) + ) + end + attr_reader :detail + + sig do + params( + detail: OpenAI::Beta::Threads::ImageURL::Detail::OrSymbol + ).void + end + attr_writer :detail + + sig do + params( + url: String, + detail: OpenAI::Beta::Threads::ImageURL::Detail::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The external URL of the image, must be a supported image types: jpeg, jpg, png, + # gif, webp. + url:, + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. Default value is `auto` + detail: nil + ) + end + + sig do + override.returns( + { + url: String, + detail: OpenAI::Beta::Threads::ImageURL::Detail::OrSymbol + } + ) + end + def to_hash + end + + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. Default value is `auto` + module Detail + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::ImageURL::Detail) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Beta::Threads::ImageURL::Detail::TaggedSymbol + ) + LOW = + T.let(:low, OpenAI::Beta::Threads::ImageURL::Detail::TaggedSymbol) + HIGH = + T.let( + :high, + OpenAI::Beta::Threads::ImageURL::Detail::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::ImageURL::Detail::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/image_url_content_block.rbi b/rbi/openai/models/beta/threads/image_url_content_block.rbi new file mode 100644 index 00000000..eeaa7a8d --- /dev/null +++ b/rbi/openai/models/beta/threads/image_url_content_block.rbi @@ -0,0 +1,53 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class ImageURLContentBlock < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::ImageURLContentBlock, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::Beta::Threads::ImageURL) } + attr_reader :image_url + + sig do + params(image_url: OpenAI::Beta::Threads::ImageURL::OrHash).void + end + attr_writer :image_url + + # The type of the content part. + sig { returns(Symbol) } + attr_accessor :type + + # References an image URL in the content of a message. + sig do + params( + image_url: OpenAI::Beta::Threads::ImageURL::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + image_url:, + # The type of the content part. + type: :image_url + ) + end + + sig do + override.returns( + { image_url: OpenAI::Beta::Threads::ImageURL, type: Symbol } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/image_url_delta.rbi b/rbi/openai/models/beta/threads/image_url_delta.rbi new file mode 100644 index 00000000..b760eefa --- /dev/null +++ b/rbi/openai/models/beta/threads/image_url_delta.rbi @@ -0,0 +1,111 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class ImageURLDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::ImageURLDelta, + OpenAI::Internal::AnyHash + ) + end + + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::ImageURLDelta::Detail::TaggedSymbol + ) + ) + end + attr_reader :detail + + sig do + params( + detail: OpenAI::Beta::Threads::ImageURLDelta::Detail::OrSymbol + ).void + end + attr_writer :detail + + # The URL of the image, must be a supported image types: jpeg, jpg, png, gif, + # webp. + sig { returns(T.nilable(String)) } + attr_reader :url + + sig { params(url: String).void } + attr_writer :url + + sig do + params( + detail: OpenAI::Beta::Threads::ImageURLDelta::Detail::OrSymbol, + url: String + ).returns(T.attached_class) + end + def self.new( + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. + detail: nil, + # The URL of the image, must be a supported image types: jpeg, jpg, png, gif, + # webp. + url: nil + ) + end + + sig do + override.returns( + { + detail: + OpenAI::Beta::Threads::ImageURLDelta::Detail::TaggedSymbol, + url: String + } + ) + end + def to_hash + end + + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. + module Detail + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::ImageURLDelta::Detail) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Beta::Threads::ImageURLDelta::Detail::TaggedSymbol + ) + LOW = + T.let( + :low, + OpenAI::Beta::Threads::ImageURLDelta::Detail::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Beta::Threads::ImageURLDelta::Detail::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::ImageURLDelta::Detail::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/image_url_delta_block.rbi b/rbi/openai/models/beta/threads/image_url_delta_block.rbi new file mode 100644 index 00000000..0a4c523c --- /dev/null +++ b/rbi/openai/models/beta/threads/image_url_delta_block.rbi @@ -0,0 +1,64 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class ImageURLDeltaBlock < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::ImageURLDeltaBlock, + OpenAI::Internal::AnyHash + ) + end + + # The index of the content part in the message. + sig { returns(Integer) } + attr_accessor :index + + # Always `image_url`. + sig { returns(Symbol) } + attr_accessor :type + + sig { returns(T.nilable(OpenAI::Beta::Threads::ImageURLDelta)) } + attr_reader :image_url + + sig do + params(image_url: OpenAI::Beta::Threads::ImageURLDelta::OrHash).void + end + attr_writer :image_url + + # References an image URL in the content of a message. + sig do + params( + index: Integer, + image_url: OpenAI::Beta::Threads::ImageURLDelta::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the content part in the message. + index:, + image_url: nil, + # Always `image_url`. + type: :image_url + ) + end + + sig do + override.returns( + { + index: Integer, + type: Symbol, + image_url: OpenAI::Beta::Threads::ImageURLDelta + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message.rbi b/rbi/openai/models/beta/threads/message.rbi new file mode 100644 index 00000000..c9a985aa --- /dev/null +++ b/rbi/openai/models/beta/threads/message.rbi @@ -0,0 +1,486 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class Message < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::Threads::Message, OpenAI::Internal::AnyHash) + end + + # The identifier, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # If applicable, the ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) that + # authored this message. + sig { returns(T.nilable(String)) } + attr_accessor :assistant_id + + # A list of files attached to the message, and the tools they were added to. + sig do + returns( + T.nilable(T::Array[OpenAI::Beta::Threads::Message::Attachment]) + ) + end + attr_accessor :attachments + + # The Unix timestamp (in seconds) for when the message was completed. + sig { returns(T.nilable(Integer)) } + attr_accessor :completed_at + + # The content of the message in array of text and/or images. + sig do + returns(T::Array[OpenAI::Beta::Threads::MessageContent::Variants]) + end + attr_accessor :content + + # The Unix timestamp (in seconds) for when the message was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The Unix timestamp (in seconds) for when the message was marked as incomplete. + sig { returns(T.nilable(Integer)) } + attr_accessor :incomplete_at + + # On an incomplete message, details about why the message is incomplete. + sig do + returns( + T.nilable(OpenAI::Beta::Threads::Message::IncompleteDetails) + ) + end + attr_reader :incomplete_details + + sig do + params( + incomplete_details: + T.nilable( + OpenAI::Beta::Threads::Message::IncompleteDetails::OrHash + ) + ).void + end + attr_writer :incomplete_details + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The object type, which is always `thread.message`. + sig { returns(Symbol) } + attr_accessor :object + + # The entity that produced the message. One of `user` or `assistant`. + sig { returns(OpenAI::Beta::Threads::Message::Role::TaggedSymbol) } + attr_accessor :role + + # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) + # associated with the creation of this message. Value is `null` when messages are + # created manually using the create message or create thread endpoints. + sig { returns(T.nilable(String)) } + attr_accessor :run_id + + # The status of the message, which can be either `in_progress`, `incomplete`, or + # `completed`. + sig { returns(OpenAI::Beta::Threads::Message::Status::TaggedSymbol) } + attr_accessor :status + + # The [thread](https://platform.openai.com/docs/api-reference/threads) ID that + # this message belongs to. + sig { returns(String) } + attr_accessor :thread_id + + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig do + params( + id: String, + assistant_id: T.nilable(String), + attachments: + T.nilable( + T::Array[OpenAI::Beta::Threads::Message::Attachment::OrHash] + ), + completed_at: T.nilable(Integer), + content: + T::Array[ + T.any( + OpenAI::Beta::Threads::ImageFileContentBlock::OrHash, + OpenAI::Beta::Threads::ImageURLContentBlock::OrHash, + OpenAI::Beta::Threads::TextContentBlock::OrHash, + OpenAI::Beta::Threads::RefusalContentBlock::OrHash + ) + ], + created_at: Integer, + incomplete_at: T.nilable(Integer), + incomplete_details: + T.nilable( + OpenAI::Beta::Threads::Message::IncompleteDetails::OrHash + ), + metadata: T.nilable(T::Hash[Symbol, String]), + role: OpenAI::Beta::Threads::Message::Role::OrSymbol, + run_id: T.nilable(String), + status: OpenAI::Beta::Threads::Message::Status::OrSymbol, + thread_id: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier, which can be referenced in API endpoints. + id:, + # If applicable, the ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) that + # authored this message. + assistant_id:, + # A list of files attached to the message, and the tools they were added to. + attachments:, + # The Unix timestamp (in seconds) for when the message was completed. + completed_at:, + # The content of the message in array of text and/or images. + content:, + # The Unix timestamp (in seconds) for when the message was created. + created_at:, + # The Unix timestamp (in seconds) for when the message was marked as incomplete. + incomplete_at:, + # On an incomplete message, details about why the message is incomplete. + incomplete_details:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The entity that produced the message. One of `user` or `assistant`. + role:, + # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) + # associated with the creation of this message. Value is `null` when messages are + # created manually using the create message or create thread endpoints. + run_id:, + # The status of the message, which can be either `in_progress`, `incomplete`, or + # `completed`. + status:, + # The [thread](https://platform.openai.com/docs/api-reference/threads) ID that + # this message belongs to. + thread_id:, + # The object type, which is always `thread.message`. + object: :"thread.message" + ) + end + + sig do + override.returns( + { + id: String, + assistant_id: T.nilable(String), + attachments: + T.nilable( + T::Array[OpenAI::Beta::Threads::Message::Attachment] + ), + completed_at: T.nilable(Integer), + content: + T::Array[OpenAI::Beta::Threads::MessageContent::Variants], + created_at: Integer, + incomplete_at: T.nilable(Integer), + incomplete_details: + T.nilable(OpenAI::Beta::Threads::Message::IncompleteDetails), + metadata: T.nilable(T::Hash[Symbol, String]), + object: Symbol, + role: OpenAI::Beta::Threads::Message::Role::TaggedSymbol, + run_id: T.nilable(String), + status: OpenAI::Beta::Threads::Message::Status::TaggedSymbol, + thread_id: String + } + ) + end + def to_hash + end + + class Attachment < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Message::Attachment, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file to attach to the message. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + # The tools to add this file to. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::Threads::Message::Attachment::Tool::Variants + ] + ) + ) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly::OrHash + ) + ] + ).void + end + attr_writer :tools + + sig do + params( + file_id: String, + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly::OrHash + ) + ] + ).returns(T.attached_class) + end + def self.new( + # The ID of the file to attach to the message. + file_id: nil, + # The tools to add this file to. + tools: nil + ) + end + + sig do + override.returns( + { + file_id: String, + tools: + T::Array[ + OpenAI::Beta::Threads::Message::Attachment::Tool::Variants + ] + } + ) + end + def to_hash + end + + module Tool + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly + ) + end + + class AssistantToolsFileSearchTypeOnly < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly, + OpenAI::Internal::AnyHash + ) + end + + # The type of tool being defined: `file_search` + sig { returns(Symbol) } + attr_accessor :type + + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of tool being defined: `file_search` + type: :file_search + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Message::Attachment::Tool::Variants + ] + ) + end + def self.variants + end + end + end + + class IncompleteDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Message::IncompleteDetails, + OpenAI::Internal::AnyHash + ) + end + + # The reason the message is incomplete. + sig do + returns( + OpenAI::Beta::Threads::Message::IncompleteDetails::Reason::TaggedSymbol + ) + end + attr_accessor :reason + + # On an incomplete message, details about why the message is incomplete. + sig do + params( + reason: + OpenAI::Beta::Threads::Message::IncompleteDetails::Reason::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The reason the message is incomplete. + reason: + ) + end + + sig do + override.returns( + { + reason: + OpenAI::Beta::Threads::Message::IncompleteDetails::Reason::TaggedSymbol + } + ) + end + def to_hash + end + + # The reason the message is incomplete. + module Reason + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::Threads::Message::IncompleteDetails::Reason + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + CONTENT_FILTER = + T.let( + :content_filter, + OpenAI::Beta::Threads::Message::IncompleteDetails::Reason::TaggedSymbol + ) + MAX_TOKENS = + T.let( + :max_tokens, + OpenAI::Beta::Threads::Message::IncompleteDetails::Reason::TaggedSymbol + ) + RUN_CANCELLED = + T.let( + :run_cancelled, + OpenAI::Beta::Threads::Message::IncompleteDetails::Reason::TaggedSymbol + ) + RUN_EXPIRED = + T.let( + :run_expired, + OpenAI::Beta::Threads::Message::IncompleteDetails::Reason::TaggedSymbol + ) + RUN_FAILED = + T.let( + :run_failed, + OpenAI::Beta::Threads::Message::IncompleteDetails::Reason::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Message::IncompleteDetails::Reason::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + # The entity that produced the message. One of `user` or `assistant`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::Message::Role) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let(:user, OpenAI::Beta::Threads::Message::Role::TaggedSymbol) + ASSISTANT = + T.let( + :assistant, + OpenAI::Beta::Threads::Message::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::Message::Role::TaggedSymbol] + ) + end + def self.values + end + end + + # The status of the message, which can be either `in_progress`, `incomplete`, or + # `completed`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::Message::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Beta::Threads::Message::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Beta::Threads::Message::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Beta::Threads::Message::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::Message::Status::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_content.rbi b/rbi/openai/models/beta/threads/message_content.rbi new file mode 100644 index 00000000..5b159a12 --- /dev/null +++ b/rbi/openai/models/beta/threads/message_content.rbi @@ -0,0 +1,33 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. + module MessageContent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::ImageFileContentBlock, + OpenAI::Beta::Threads::ImageURLContentBlock, + OpenAI::Beta::Threads::TextContentBlock, + OpenAI::Beta::Threads::RefusalContentBlock + ) + end + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::MessageContent::Variants] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_content_delta.rbi b/rbi/openai/models/beta/threads/message_content_delta.rbi new file mode 100644 index 00000000..4ed6646b --- /dev/null +++ b/rbi/openai/models/beta/threads/message_content_delta.rbi @@ -0,0 +1,33 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. + module MessageContentDelta + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::ImageFileDeltaBlock, + OpenAI::Beta::Threads::TextDeltaBlock, + OpenAI::Beta::Threads::RefusalDeltaBlock, + OpenAI::Beta::Threads::ImageURLDeltaBlock + ) + end + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::MessageContentDelta::Variants] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_content_part_param.rbi b/rbi/openai/models/beta/threads/message_content_part_param.rbi new file mode 100644 index 00000000..38daf411 --- /dev/null +++ b/rbi/openai/models/beta/threads/message_content_part_param.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. + module MessageContentPartParam + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::ImageFileContentBlock, + OpenAI::Beta::Threads::ImageURLContentBlock, + OpenAI::Beta::Threads::TextContentBlockParam + ) + end + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::MessageContentPartParam::Variants] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_create_params.rbi b/rbi/openai/models/beta/threads/message_create_params.rbi new file mode 100644 index 00000000..0fca1da4 --- /dev/null +++ b/rbi/openai/models/beta/threads/message_create_params.rbi @@ -0,0 +1,318 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class MessageCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::MessageCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # The text contents of the message. + sig do + returns( + OpenAI::Beta::Threads::MessageCreateParams::Content::Variants + ) + end + attr_accessor :content + + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + sig do + returns(OpenAI::Beta::Threads::MessageCreateParams::Role::OrSymbol) + end + attr_accessor :role + + # A list of files attached to the message, and the tools they should be added to. + sig do + returns( + T.nilable( + T::Array[OpenAI::Beta::Threads::MessageCreateParams::Attachment] + ) + ) + end + attr_accessor :attachments + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + content: + OpenAI::Beta::Threads::MessageCreateParams::Content::Variants, + role: OpenAI::Beta::Threads::MessageCreateParams::Role::OrSymbol, + attachments: + T.nilable( + T::Array[ + OpenAI::Beta::Threads::MessageCreateParams::Attachment::OrHash + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The text contents of the message. + content:, + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + role:, + # A list of files attached to the message, and the tools they should be added to. + attachments: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + content: + OpenAI::Beta::Threads::MessageCreateParams::Content::Variants, + role: + OpenAI::Beta::Threads::MessageCreateParams::Role::OrSymbol, + attachments: + T.nilable( + T::Array[ + OpenAI::Beta::Threads::MessageCreateParams::Attachment + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The text contents of the message. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[ + OpenAI::Beta::Threads::MessageContentPartParam::Variants + ] + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::MessageCreateParams::Content::Variants + ] + ) + end + def self.variants + end + + MessageContentPartParamArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::Beta::Threads::MessageContentPartParam + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::MessageCreateParams::Role) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Beta::Threads::MessageCreateParams::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Beta::Threads::MessageCreateParams::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::MessageCreateParams::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + class Attachment < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::MessageCreateParams::Attachment, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file to attach to the message. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + # The tools to add this file to. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch + ) + ] + ) + ) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch::OrHash + ) + ] + ).void + end + attr_writer :tools + + sig do + params( + file_id: String, + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch::OrHash + ) + ] + ).returns(T.attached_class) + end + def self.new( + # The ID of the file to attach to the message. + file_id: nil, + # The tools to add this file to. + tools: nil + ) + end + + sig do + override.returns( + { + file_id: String, + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch + ) + ] + } + ) + end + def to_hash + end + + module Tool + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch + ) + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The type of tool being defined: `file_search` + sig { returns(Symbol) } + attr_accessor :type + + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of tool being defined: `file_search` + type: :file_search + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_delete_params.rbi b/rbi/openai/models/beta/threads/message_delete_params.rbi new file mode 100644 index 00000000..9c875c22 --- /dev/null +++ b/rbi/openai/models/beta/threads/message_delete_params.rbi @@ -0,0 +1,42 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class MessageDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::MessageDeleteParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :thread_id + + sig do + params( + thread_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(thread_id:, request_options: {}) + end + + sig do + override.returns( + { thread_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_deleted.rbi b/rbi/openai/models/beta/threads/message_deleted.rbi new file mode 100644 index 00000000..d794d043 --- /dev/null +++ b/rbi/openai/models/beta/threads/message_deleted.rbi @@ -0,0 +1,44 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class MessageDeleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::MessageDeleted, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :id + + sig { returns(T::Boolean) } + attr_accessor :deleted + + sig { returns(Symbol) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: Symbol).returns( + T.attached_class + ) + end + def self.new(id:, deleted:, object: :"thread.message.deleted") + end + + sig do + override.returns( + { id: String, deleted: T::Boolean, object: Symbol } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_delta.rbi b/rbi/openai/models/beta/threads/message_delta.rbi new file mode 100644 index 00000000..7348c453 --- /dev/null +++ b/rbi/openai/models/beta/threads/message_delta.rbi @@ -0,0 +1,128 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class MessageDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::MessageDelta, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message in array of text and/or images. + sig do + returns( + T.nilable( + T::Array[OpenAI::Beta::Threads::MessageContentDelta::Variants] + ) + ) + end + attr_reader :content + + sig do + params( + content: + T::Array[ + T.any( + OpenAI::Beta::Threads::ImageFileDeltaBlock::OrHash, + OpenAI::Beta::Threads::TextDeltaBlock::OrHash, + OpenAI::Beta::Threads::RefusalDeltaBlock::OrHash, + OpenAI::Beta::Threads::ImageURLDeltaBlock::OrHash + ) + ] + ).void + end + attr_writer :content + + # The entity that produced the message. One of `user` or `assistant`. + sig do + returns( + T.nilable(OpenAI::Beta::Threads::MessageDelta::Role::TaggedSymbol) + ) + end + attr_reader :role + + sig do + params( + role: OpenAI::Beta::Threads::MessageDelta::Role::OrSymbol + ).void + end + attr_writer :role + + # The delta containing the fields that have changed on the Message. + sig do + params( + content: + T::Array[ + T.any( + OpenAI::Beta::Threads::ImageFileDeltaBlock::OrHash, + OpenAI::Beta::Threads::TextDeltaBlock::OrHash, + OpenAI::Beta::Threads::RefusalDeltaBlock::OrHash, + OpenAI::Beta::Threads::ImageURLDeltaBlock::OrHash + ) + ], + role: OpenAI::Beta::Threads::MessageDelta::Role::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The content of the message in array of text and/or images. + content: nil, + # The entity that produced the message. One of `user` or `assistant`. + role: nil + ) + end + + sig do + override.returns( + { + content: + T::Array[ + OpenAI::Beta::Threads::MessageContentDelta::Variants + ], + role: OpenAI::Beta::Threads::MessageDelta::Role::TaggedSymbol + } + ) + end + def to_hash + end + + # The entity that produced the message. One of `user` or `assistant`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::MessageDelta::Role) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Beta::Threads::MessageDelta::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Beta::Threads::MessageDelta::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::MessageDelta::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_delta_event.rbi b/rbi/openai/models/beta/threads/message_delta_event.rbi new file mode 100644 index 00000000..9dc95d52 --- /dev/null +++ b/rbi/openai/models/beta/threads/message_delta_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class MessageDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::MessageDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the message, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The delta containing the fields that have changed on the Message. + sig { returns(OpenAI::Beta::Threads::MessageDelta) } + attr_reader :delta + + sig do + params(delta: OpenAI::Beta::Threads::MessageDelta::OrHash).void + end + attr_writer :delta + + # The object type, which is always `thread.message.delta`. + sig { returns(Symbol) } + attr_accessor :object + + # Represents a message delta i.e. any changed fields on a message during + # streaming. + sig do + params( + id: String, + delta: OpenAI::Beta::Threads::MessageDelta::OrHash, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier of the message, which can be referenced in API endpoints. + id:, + # The delta containing the fields that have changed on the Message. + delta:, + # The object type, which is always `thread.message.delta`. + object: :"thread.message.delta" + ) + end + + sig do + override.returns( + { + id: String, + delta: OpenAI::Beta::Threads::MessageDelta, + object: Symbol + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_list_params.rbi b/rbi/openai/models/beta/threads/message_list_params.rbi new file mode 100644 index 00000000..7873262c --- /dev/null +++ b/rbi/openai/models/beta/threads/message_list_params.rbi @@ -0,0 +1,157 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class MessageListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::MessageListParams, + OpenAI::Internal::AnyHash + ) + end + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + sig { returns(T.nilable(String)) } + attr_reader :before + + sig { params(before: String).void } + attr_writer :before + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::MessageListParams::Order::OrSymbol + ) + ) + end + attr_reader :order + + sig do + params( + order: OpenAI::Beta::Threads::MessageListParams::Order::OrSymbol + ).void + end + attr_writer :order + + # Filter messages by the run ID that generated them. + sig { returns(T.nilable(String)) } + attr_reader :run_id + + sig { params(run_id: String).void } + attr_writer :run_id + + sig do + params( + after: String, + before: String, + limit: Integer, + order: OpenAI::Beta::Threads::MessageListParams::Order::OrSymbol, + run_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + # Filter messages by the run ID that generated them. + run_id: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + before: String, + limit: Integer, + order: + OpenAI::Beta::Threads::MessageListParams::Order::OrSymbol, + run_id: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::MessageListParams::Order) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let( + :asc, + OpenAI::Beta::Threads::MessageListParams::Order::TaggedSymbol + ) + DESC = + T.let( + :desc, + OpenAI::Beta::Threads::MessageListParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::MessageListParams::Order::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_retrieve_params.rbi b/rbi/openai/models/beta/threads/message_retrieve_params.rbi new file mode 100644 index 00000000..c0d12ba0 --- /dev/null +++ b/rbi/openai/models/beta/threads/message_retrieve_params.rbi @@ -0,0 +1,42 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class MessageRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::MessageRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :thread_id + + sig do + params( + thread_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(thread_id:, request_options: {}) + end + + sig do + override.returns( + { thread_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/message_update_params.rbi b/rbi/openai/models/beta/threads/message_update_params.rbi new file mode 100644 index 00000000..767a7340 --- /dev/null +++ b/rbi/openai/models/beta/threads/message_update_params.rbi @@ -0,0 +1,66 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class MessageUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::MessageUpdateParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :thread_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + thread_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + thread_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + thread_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/refusal_content_block.rbi b/rbi/openai/models/beta/threads/refusal_content_block.rbi new file mode 100644 index 00000000..24ad27e7 --- /dev/null +++ b/rbi/openai/models/beta/threads/refusal_content_block.rbi @@ -0,0 +1,41 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class RefusalContentBlock < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RefusalContentBlock, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :refusal + + # Always `refusal`. + sig { returns(Symbol) } + attr_accessor :type + + # The refusal content generated by the assistant. + sig do + params(refusal: String, type: Symbol).returns(T.attached_class) + end + def self.new( + refusal:, + # Always `refusal`. + type: :refusal + ) + end + + sig { override.returns({ refusal: String, type: Symbol }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/refusal_delta_block.rbi b/rbi/openai/models/beta/threads/refusal_delta_block.rbi new file mode 100644 index 00000000..7fe5eed0 --- /dev/null +++ b/rbi/openai/models/beta/threads/refusal_delta_block.rbi @@ -0,0 +1,54 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class RefusalDeltaBlock < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RefusalDeltaBlock, + OpenAI::Internal::AnyHash + ) + end + + # The index of the refusal part in the message. + sig { returns(Integer) } + attr_accessor :index + + # Always `refusal`. + sig { returns(Symbol) } + attr_accessor :type + + sig { returns(T.nilable(String)) } + attr_reader :refusal + + sig { params(refusal: String).void } + attr_writer :refusal + + # The refusal content that is part of a message. + sig do + params(index: Integer, refusal: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The index of the refusal part in the message. + index:, + refusal: nil, + # Always `refusal`. + type: :refusal + ) + end + + sig do + override.returns({ index: Integer, type: Symbol, refusal: String }) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/required_action_function_tool_call.rbi b/rbi/openai/models/beta/threads/required_action_function_tool_call.rbi new file mode 100644 index 00000000..c92a1e83 --- /dev/null +++ b/rbi/openai/models/beta/threads/required_action_function_tool_call.rbi @@ -0,0 +1,117 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class RequiredActionFunctionToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RequiredActionFunctionToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the tool call. This ID must be referenced when you submit the tool + # outputs in using the + # [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # endpoint. + sig { returns(String) } + attr_accessor :id + + # The function definition. + sig do + returns( + OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function + ) + end + attr_reader :function + + sig do + params( + function: + OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function::OrHash + ).void + end + attr_writer :function + + # The type of tool call the output is required for. For now, this is always + # `function`. + sig { returns(Symbol) } + attr_accessor :type + + # Tool call objects + sig do + params( + id: String, + function: + OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the tool call. This ID must be referenced when you submit the tool + # outputs in using the + # [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # endpoint. + id:, + # The function definition. + function:, + # The type of tool call the output is required for. For now, this is always + # `function`. + type: :function + ) + end + + sig do + override.returns( + { + id: String, + function: + OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function, + type: Symbol + } + ) + end + def to_hash + end + + class Function < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function, + OpenAI::Internal::AnyHash + ) + end + + # The arguments that the model expects you to pass to the function. + sig { returns(String) } + attr_accessor :arguments + + # The name of the function. + sig { returns(String) } + attr_accessor :name + + # The function definition. + sig do + params(arguments: String, name: String).returns(T.attached_class) + end + def self.new( + # The arguments that the model expects you to pass to the function. + arguments:, + # The name of the function. + name: + ) + end + + sig { override.returns({ arguments: String, name: String }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/run.rbi b/rbi/openai/models/beta/threads/run.rbi new file mode 100644 index 00000000..1f45da92 --- /dev/null +++ b/rbi/openai/models/beta/threads/run.rbi @@ -0,0 +1,870 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class Run < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::Threads::Run, OpenAI::Internal::AnyHash) + end + + # The identifier, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # execution of this run. + sig { returns(String) } + attr_accessor :assistant_id + + # The Unix timestamp (in seconds) for when the run was cancelled. + sig { returns(T.nilable(Integer)) } + attr_accessor :cancelled_at + + # The Unix timestamp (in seconds) for when the run was completed. + sig { returns(T.nilable(Integer)) } + attr_accessor :completed_at + + # The Unix timestamp (in seconds) for when the run was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The Unix timestamp (in seconds) for when the run will expire. + sig { returns(T.nilable(Integer)) } + attr_accessor :expires_at + + # The Unix timestamp (in seconds) for when the run failed. + sig { returns(T.nilable(Integer)) } + attr_accessor :failed_at + + # Details on why the run is incomplete. Will be `null` if the run is not + # incomplete. + sig do + returns(T.nilable(OpenAI::Beta::Threads::Run::IncompleteDetails)) + end + attr_reader :incomplete_details + + sig do + params( + incomplete_details: + T.nilable(OpenAI::Beta::Threads::Run::IncompleteDetails::OrHash) + ).void + end + attr_writer :incomplete_details + + # The instructions that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. + sig { returns(String) } + attr_accessor :instructions + + # The last error associated with this run. Will be `null` if there are no errors. + sig { returns(T.nilable(OpenAI::Beta::Threads::Run::LastError)) } + attr_reader :last_error + + sig do + params( + last_error: + T.nilable(OpenAI::Beta::Threads::Run::LastError::OrHash) + ).void + end + attr_writer :last_error + + # The maximum number of completion tokens specified to have been used over the + # course of the run. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_completion_tokens + + # The maximum number of prompt tokens specified to have been used over the course + # of the run. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_prompt_tokens + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The model that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. + sig { returns(String) } + attr_accessor :model + + # The object type, which is always `thread.run`. + sig { returns(Symbol) } + attr_accessor :object + + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + sig { returns(T::Boolean) } + attr_accessor :parallel_tool_calls + + # Details on the action required to continue the run. Will be `null` if no action + # is required. + sig { returns(T.nilable(OpenAI::Beta::Threads::Run::RequiredAction)) } + attr_reader :required_action + + sig do + params( + required_action: + T.nilable(OpenAI::Beta::Threads::Run::RequiredAction::OrHash) + ).void + end + attr_writer :required_action + + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + sig do + returns( + T.nilable(OpenAI::Beta::AssistantResponseFormatOption::Variants) + ) + end + attr_accessor :response_format + + # The Unix timestamp (in seconds) for when the run was started. + sig { returns(T.nilable(Integer)) } + attr_accessor :started_at + + # The status of the run, which can be either `queued`, `in_progress`, + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. + sig { returns(OpenAI::Beta::Threads::RunStatus::TaggedSymbol) } + attr_accessor :status + + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was executed on as a part of this run. + sig { returns(String) } + attr_accessor :thread_id + + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + sig do + returns( + T.nilable(OpenAI::Beta::AssistantToolChoiceOption::Variants) + ) + end + attr_accessor :tool_choice + + # The list of tools that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. + sig { returns(T::Array[OpenAI::Beta::AssistantTool::Variants]) } + attr_accessor :tools + + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + sig do + returns(T.nilable(OpenAI::Beta::Threads::Run::TruncationStrategy)) + end + attr_reader :truncation_strategy + + sig do + params( + truncation_strategy: + T.nilable( + OpenAI::Beta::Threads::Run::TruncationStrategy::OrHash + ) + ).void + end + attr_writer :truncation_strategy + + # Usage statistics related to the run. This value will be `null` if the run is not + # in a terminal state (i.e. `in_progress`, `queued`, etc.). + sig { returns(T.nilable(OpenAI::Beta::Threads::Run::Usage)) } + attr_reader :usage + + sig do + params( + usage: T.nilable(OpenAI::Beta::Threads::Run::Usage::OrHash) + ).void + end + attr_writer :usage + + # The sampling temperature used for this run. If not set, defaults to 1. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # The nucleus sampling value used for this run. If not set, defaults to 1. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). + sig do + params( + id: String, + assistant_id: String, + cancelled_at: T.nilable(Integer), + completed_at: T.nilable(Integer), + created_at: Integer, + expires_at: T.nilable(Integer), + failed_at: T.nilable(Integer), + incomplete_details: + T.nilable( + OpenAI::Beta::Threads::Run::IncompleteDetails::OrHash + ), + instructions: String, + last_error: + T.nilable(OpenAI::Beta::Threads::Run::LastError::OrHash), + max_completion_tokens: T.nilable(Integer), + max_prompt_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + parallel_tool_calls: T::Boolean, + required_action: + T.nilable(OpenAI::Beta::Threads::Run::RequiredAction::OrHash), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + started_at: T.nilable(Integer), + status: OpenAI::Beta::Threads::RunStatus::OrSymbol, + thread_id: String, + tool_choice: + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice::OrHash + ) + ), + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ], + truncation_strategy: + T.nilable( + OpenAI::Beta::Threads::Run::TruncationStrategy::OrHash + ), + usage: T.nilable(OpenAI::Beta::Threads::Run::Usage::OrHash), + temperature: T.nilable(Float), + top_p: T.nilable(Float), + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier, which can be referenced in API endpoints. + id:, + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # execution of this run. + assistant_id:, + # The Unix timestamp (in seconds) for when the run was cancelled. + cancelled_at:, + # The Unix timestamp (in seconds) for when the run was completed. + completed_at:, + # The Unix timestamp (in seconds) for when the run was created. + created_at:, + # The Unix timestamp (in seconds) for when the run will expire. + expires_at:, + # The Unix timestamp (in seconds) for when the run failed. + failed_at:, + # Details on why the run is incomplete. Will be `null` if the run is not + # incomplete. + incomplete_details:, + # The instructions that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. + instructions:, + # The last error associated with this run. Will be `null` if there are no errors. + last_error:, + # The maximum number of completion tokens specified to have been used over the + # course of the run. + max_completion_tokens:, + # The maximum number of prompt tokens specified to have been used over the course + # of the run. + max_prompt_tokens:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The model that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. + model:, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + parallel_tool_calls:, + # Details on the action required to continue the run. Will be `null` if no action + # is required. + required_action:, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format:, + # The Unix timestamp (in seconds) for when the run was started. + started_at:, + # The status of the run, which can be either `queued`, `in_progress`, + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. + status:, + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was executed on as a part of this run. + thread_id:, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + tool_choice:, + # The list of tools that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. + tools:, + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + truncation_strategy:, + # Usage statistics related to the run. This value will be `null` if the run is not + # in a terminal state (i.e. `in_progress`, `queued`, etc.). + usage:, + # The sampling temperature used for this run. If not set, defaults to 1. + temperature: nil, + # The nucleus sampling value used for this run. If not set, defaults to 1. + top_p: nil, + # The object type, which is always `thread.run`. + object: :"thread.run" + ) + end + + sig do + override.returns( + { + id: String, + assistant_id: String, + cancelled_at: T.nilable(Integer), + completed_at: T.nilable(Integer), + created_at: Integer, + expires_at: T.nilable(Integer), + failed_at: T.nilable(Integer), + incomplete_details: + T.nilable(OpenAI::Beta::Threads::Run::IncompleteDetails), + instructions: String, + last_error: T.nilable(OpenAI::Beta::Threads::Run::LastError), + max_completion_tokens: T.nilable(Integer), + max_prompt_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + object: Symbol, + parallel_tool_calls: T::Boolean, + required_action: + T.nilable(OpenAI::Beta::Threads::Run::RequiredAction), + response_format: + T.nilable( + OpenAI::Beta::AssistantResponseFormatOption::Variants + ), + started_at: T.nilable(Integer), + status: OpenAI::Beta::Threads::RunStatus::TaggedSymbol, + thread_id: String, + tool_choice: + T.nilable(OpenAI::Beta::AssistantToolChoiceOption::Variants), + tools: T::Array[OpenAI::Beta::AssistantTool::Variants], + truncation_strategy: + T.nilable(OpenAI::Beta::Threads::Run::TruncationStrategy), + usage: T.nilable(OpenAI::Beta::Threads::Run::Usage), + temperature: T.nilable(Float), + top_p: T.nilable(Float) + } + ) + end + def to_hash + end + + class IncompleteDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Run::IncompleteDetails, + OpenAI::Internal::AnyHash + ) + end + + # The reason why the run is incomplete. This will point to which specific token + # limit was reached over the course of the run. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::Run::IncompleteDetails::Reason::TaggedSymbol + ) + ) + end + attr_reader :reason + + sig do + params( + reason: + OpenAI::Beta::Threads::Run::IncompleteDetails::Reason::OrSymbol + ).void + end + attr_writer :reason + + # Details on why the run is incomplete. Will be `null` if the run is not + # incomplete. + sig do + params( + reason: + OpenAI::Beta::Threads::Run::IncompleteDetails::Reason::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The reason why the run is incomplete. This will point to which specific token + # limit was reached over the course of the run. + reason: nil + ) + end + + sig do + override.returns( + { + reason: + OpenAI::Beta::Threads::Run::IncompleteDetails::Reason::TaggedSymbol + } + ) + end + def to_hash + end + + # The reason why the run is incomplete. This will point to which specific token + # limit was reached over the course of the run. + module Reason + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::Threads::Run::IncompleteDetails::Reason + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MAX_COMPLETION_TOKENS = + T.let( + :max_completion_tokens, + OpenAI::Beta::Threads::Run::IncompleteDetails::Reason::TaggedSymbol + ) + MAX_PROMPT_TOKENS = + T.let( + :max_prompt_tokens, + OpenAI::Beta::Threads::Run::IncompleteDetails::Reason::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Run::IncompleteDetails::Reason::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class LastError < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Run::LastError, + OpenAI::Internal::AnyHash + ) + end + + # One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + sig do + returns(OpenAI::Beta::Threads::Run::LastError::Code::TaggedSymbol) + end + attr_accessor :code + + # A human-readable description of the error. + sig { returns(String) } + attr_accessor :message + + # The last error associated with this run. Will be `null` if there are no errors. + sig do + params( + code: OpenAI::Beta::Threads::Run::LastError::Code::OrSymbol, + message: String + ).returns(T.attached_class) + end + def self.new( + # One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + code:, + # A human-readable description of the error. + message: + ) + end + + sig do + override.returns( + { + code: + OpenAI::Beta::Threads::Run::LastError::Code::TaggedSymbol, + message: String + } + ) + end + def to_hash + end + + # One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + module Code + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::Run::LastError::Code) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SERVER_ERROR = + T.let( + :server_error, + OpenAI::Beta::Threads::Run::LastError::Code::TaggedSymbol + ) + RATE_LIMIT_EXCEEDED = + T.let( + :rate_limit_exceeded, + OpenAI::Beta::Threads::Run::LastError::Code::TaggedSymbol + ) + INVALID_PROMPT = + T.let( + :invalid_prompt, + OpenAI::Beta::Threads::Run::LastError::Code::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Run::LastError::Code::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class RequiredAction < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Run::RequiredAction, + OpenAI::Internal::AnyHash + ) + end + + # Details on the tool outputs needed for this run to continue. + sig do + returns( + OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs + ) + end + attr_reader :submit_tool_outputs + + sig do + params( + submit_tool_outputs: + OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs::OrHash + ).void + end + attr_writer :submit_tool_outputs + + # For now, this is always `submit_tool_outputs`. + sig { returns(Symbol) } + attr_accessor :type + + # Details on the action required to continue the run. Will be `null` if no action + # is required. + sig do + params( + submit_tool_outputs: + OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Details on the tool outputs needed for this run to continue. + submit_tool_outputs:, + # For now, this is always `submit_tool_outputs`. + type: :submit_tool_outputs + ) + end + + sig do + override.returns( + { + submit_tool_outputs: + OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, + type: Symbol + } + ) + end + def to_hash + end + + class SubmitToolOutputs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, + OpenAI::Internal::AnyHash + ) + end + + # A list of the relevant tool calls. + sig do + returns( + T::Array[ + OpenAI::Beta::Threads::RequiredActionFunctionToolCall + ] + ) + end + attr_accessor :tool_calls + + # Details on the tool outputs needed for this run to continue. + sig do + params( + tool_calls: + T::Array[ + OpenAI::Beta::Threads::RequiredActionFunctionToolCall::OrHash + ] + ).returns(T.attached_class) + end + def self.new( + # A list of the relevant tool calls. + tool_calls: + ) + end + + sig do + override.returns( + { + tool_calls: + T::Array[ + OpenAI::Beta::Threads::RequiredActionFunctionToolCall + ] + } + ) + end + def to_hash + end + end + end + + class TruncationStrategy < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Run::TruncationStrategy, + OpenAI::Internal::AnyHash + ) + end + + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + sig do + returns( + OpenAI::Beta::Threads::Run::TruncationStrategy::Type::TaggedSymbol + ) + end + attr_accessor :type + + # The number of most recent messages from the thread when constructing the context + # for the run. + sig { returns(T.nilable(Integer)) } + attr_accessor :last_messages + + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + sig do + params( + type: + OpenAI::Beta::Threads::Run::TruncationStrategy::Type::OrSymbol, + last_messages: T.nilable(Integer) + ).returns(T.attached_class) + end + def self.new( + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + type:, + # The number of most recent messages from the thread when constructing the context + # for the run. + last_messages: nil + ) + end + + sig do + override.returns( + { + type: + OpenAI::Beta::Threads::Run::TruncationStrategy::Type::TaggedSymbol, + last_messages: T.nilable(Integer) + } + ) + end + def to_hash + end + + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::Threads::Run::TruncationStrategy::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Beta::Threads::Run::TruncationStrategy::Type::TaggedSymbol + ) + LAST_MESSAGES = + T.let( + :last_messages, + OpenAI::Beta::Threads::Run::TruncationStrategy::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Run::TruncationStrategy::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Run::Usage, + OpenAI::Internal::AnyHash + ) + end + + # Number of completion tokens used over the course of the run. + sig { returns(Integer) } + attr_accessor :completion_tokens + + # Number of prompt tokens used over the course of the run. + sig { returns(Integer) } + attr_accessor :prompt_tokens + + # Total number of tokens used (prompt + completion). + sig { returns(Integer) } + attr_accessor :total_tokens + + # Usage statistics related to the run. This value will be `null` if the run is not + # in a terminal state (i.e. `in_progress`, `queued`, etc.). + sig do + params( + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # Number of completion tokens used over the course of the run. + completion_tokens:, + # Number of prompt tokens used over the course of the run. + prompt_tokens:, + # Total number of tokens used (prompt + completion). + total_tokens: + ) + end + + sig do + override.returns( + { + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/run_cancel_params.rbi b/rbi/openai/models/beta/threads/run_cancel_params.rbi new file mode 100644 index 00000000..3879ca2b --- /dev/null +++ b/rbi/openai/models/beta/threads/run_cancel_params.rbi @@ -0,0 +1,42 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class RunCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunCancelParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :thread_id + + sig do + params( + thread_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(thread_id:, request_options: {}) + end + + sig do + override.returns( + { thread_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/run_create_params.rbi b/rbi/openai/models/beta/threads/run_create_params.rbi new file mode 100644 index 00000000..ccb3b696 --- /dev/null +++ b/rbi/openai/models/beta/threads/run_create_params.rbi @@ -0,0 +1,880 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class RunCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. + sig { returns(String) } + attr_accessor :assistant_id + + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + sig do + returns( + T.nilable( + T::Array[OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol] + ) + ) + end + attr_reader :include + + sig do + params( + include: + T::Array[OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol] + ).void + end + attr_writer :include + + # Appends additional instructions at the end of the instructions for the run. This + # is useful for modifying the behavior on a per-run basis without overriding other + # instructions. + sig { returns(T.nilable(String)) } + attr_accessor :additional_instructions + + # Adds additional messages to the thread before creating the run. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage + ] + ) + ) + end + attr_accessor :additional_messages + + # Overrides the + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. + sig { returns(T.nilable(String)) } + attr_accessor :instructions + + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_completion_tokens + + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_prompt_tokens + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. + sig { returns(T.nilable(T.any(String, OpenAI::ChatModel::OrSymbol))) } + attr_accessor :model + + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :parallel_tool_calls + + sig { params(parallel_tool_calls: T::Boolean).void } + attr_writer :parallel_tool_calls + + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } + attr_accessor :reasoning_effort + + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + sig do + returns( + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONObject, + OpenAI::ResponseFormatJSONSchema + ) + ) + ) + end + attr_accessor :response_format + + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + sig do + returns( + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice + ) + ) + ) + end + attr_accessor :tool_choice + + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::FileSearchTool, + OpenAI::Beta::FunctionTool + ) + ] + ) + ) + end + attr_accessor :tools + + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy + ) + ) + end + attr_reader :truncation_strategy + + sig do + params( + truncation_strategy: + T.nilable( + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::OrHash + ) + ).void + end + attr_writer :truncation_strategy + + sig do + params( + assistant_id: String, + include: + T::Array[OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol], + additional_instructions: T.nilable(String), + additional_messages: + T.nilable( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::OrHash + ] + ), + instructions: T.nilable(String), + max_completion_tokens: T.nilable(Integer), + max_prompt_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: T.nilable(T.any(String, OpenAI::ChatModel::OrSymbol)), + parallel_tool_calls: T::Boolean, + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + tool_choice: + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice::OrHash + ) + ), + tools: + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ] + ), + top_p: T.nilable(Float), + truncation_strategy: + T.nilable( + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. + assistant_id:, + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + include: nil, + # Appends additional instructions at the end of the instructions for the run. This + # is useful for modifying the behavior on a per-run basis without overriding other + # instructions. + additional_instructions: nil, + # Adds additional messages to the thread before creating the run. + additional_messages: nil, + # Overrides the + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. + instructions: nil, + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + max_completion_tokens: nil, + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + max_prompt_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. + model: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + parallel_tool_calls: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + temperature: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + tool_choice: nil, + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. + tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + truncation_strategy: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + assistant_id: String, + include: + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ], + additional_instructions: T.nilable(String), + additional_messages: + T.nilable( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage + ] + ), + instructions: T.nilable(String), + max_completion_tokens: T.nilable(Integer), + max_prompt_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: T.nilable(T.any(String, OpenAI::ChatModel::OrSymbol)), + parallel_tool_calls: T::Boolean, + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONObject, + OpenAI::ResponseFormatJSONSchema + ) + ), + temperature: T.nilable(Float), + tool_choice: + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice + ) + ), + tools: + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::FileSearchTool, + OpenAI::Beta::FunctionTool + ) + ] + ), + top_p: T.nilable(Float), + truncation_strategy: + T.nilable( + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy + ), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + class AdditionalMessage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage, + OpenAI::Internal::AnyHash + ) + end + + # The text contents of the message. + sig do + returns( + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Content::Variants + ) + end + attr_accessor :content + + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + sig do + returns( + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Role::OrSymbol + ) + end + attr_accessor :role + + # A list of files attached to the message, and the tools they should be added to. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment + ] + ) + ) + end + attr_accessor :attachments + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + content: + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Content::Variants, + role: + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Role::OrSymbol, + attachments: + T.nilable( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::OrHash + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]) + ).returns(T.attached_class) + end + def self.new( + # The text contents of the message. + content:, + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + role:, + # A list of files attached to the message, and the tools they should be added to. + attachments: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil + ) + end + + sig do + override.returns( + { + content: + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Content::Variants, + role: + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Role::OrSymbol, + attachments: + T.nilable( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + + # The text contents of the message. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[ + OpenAI::Beta::Threads::MessageContentPartParam::Variants + ] + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Content::Variants + ] + ) + end + def self.variants + end + + MessageContentPartParamArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::Beta::Threads::MessageContentPartParam + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + class Attachment < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file to attach to the message. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + # The tools to add this file to. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch + ) + ] + ) + ) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch::OrHash + ) + ] + ).void + end + attr_writer :tools + + sig do + params( + file_id: String, + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch::OrHash + ) + ] + ).returns(T.attached_class) + end + def self.new( + # The ID of the file to attach to the message. + file_id: nil, + # The tools to add this file to. + tools: nil + ) + end + + sig do + override.returns( + { + file_id: String, + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch + ) + ] + } + ) + end + def to_hash + end + + module Tool + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::CodeInterpreterTool, + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch + ) + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The type of tool being defined: `file_search` + sig { returns(Symbol) } + attr_accessor :type + + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of tool being defined: `file_search` + type: :file_search + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::Variants + ] + ) + end + def self.variants + end + end + end + end + + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::ChatModel::TaggedSymbol) } + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::Model::Variants + ] + ) + end + def self.variants + end + end + + class TruncationStrategy < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy, + OpenAI::Internal::AnyHash + ) + end + + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + sig do + returns( + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type::OrSymbol + ) + end + attr_accessor :type + + # The number of most recent messages from the thread when constructing the context + # for the run. + sig { returns(T.nilable(Integer)) } + attr_accessor :last_messages + + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + sig do + params( + type: + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type::OrSymbol, + last_messages: T.nilable(Integer) + ).returns(T.attached_class) + end + def self.new( + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + type:, + # The number of most recent messages from the thread when constructing the context + # for the run. + last_messages: nil + ) + end + + sig do + override.returns( + { + type: + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type::OrSymbol, + last_messages: T.nilable(Integer) + } + ) + end + def to_hash + end + + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type::TaggedSymbol + ) + LAST_MESSAGES = + T.let( + :last_messages, + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/run_list_params.rbi b/rbi/openai/models/beta/threads/run_list_params.rbi new file mode 100644 index 00000000..b5cefecb --- /dev/null +++ b/rbi/openai/models/beta/threads/run_list_params.rbi @@ -0,0 +1,143 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class RunListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunListParams, + OpenAI::Internal::AnyHash + ) + end + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + sig { returns(T.nilable(String)) } + attr_reader :before + + sig { params(before: String).void } + attr_writer :before + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + sig do + returns( + T.nilable(OpenAI::Beta::Threads::RunListParams::Order::OrSymbol) + ) + end + attr_reader :order + + sig do + params( + order: OpenAI::Beta::Threads::RunListParams::Order::OrSymbol + ).void + end + attr_writer :order + + sig do + params( + after: String, + before: String, + limit: Integer, + order: OpenAI::Beta::Threads::RunListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + before: String, + limit: Integer, + order: OpenAI::Beta::Threads::RunListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::RunListParams::Order) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let( + :asc, + OpenAI::Beta::Threads::RunListParams::Order::TaggedSymbol + ) + DESC = + T.let( + :desc, + OpenAI::Beta::Threads::RunListParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::RunListParams::Order::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/run_retrieve_params.rbi b/rbi/openai/models/beta/threads/run_retrieve_params.rbi new file mode 100644 index 00000000..0d257698 --- /dev/null +++ b/rbi/openai/models/beta/threads/run_retrieve_params.rbi @@ -0,0 +1,42 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class RunRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :thread_id + + sig do + params( + thread_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(thread_id:, request_options: {}) + end + + sig do + override.returns( + { thread_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/run_status.rbi b/rbi/openai/models/beta/threads/run_status.rbi new file mode 100644 index 00000000..ed3ecdb2 --- /dev/null +++ b/rbi/openai/models/beta/threads/run_status.rbi @@ -0,0 +1,50 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + # The status of the run, which can be either `queued`, `in_progress`, + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. + module RunStatus + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Beta::Threads::RunStatus) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + QUEUED = + T.let(:queued, OpenAI::Beta::Threads::RunStatus::TaggedSymbol) + IN_PROGRESS = + T.let(:in_progress, OpenAI::Beta::Threads::RunStatus::TaggedSymbol) + REQUIRES_ACTION = + T.let( + :requires_action, + OpenAI::Beta::Threads::RunStatus::TaggedSymbol + ) + CANCELLING = + T.let(:cancelling, OpenAI::Beta::Threads::RunStatus::TaggedSymbol) + CANCELLED = + T.let(:cancelled, OpenAI::Beta::Threads::RunStatus::TaggedSymbol) + FAILED = + T.let(:failed, OpenAI::Beta::Threads::RunStatus::TaggedSymbol) + COMPLETED = + T.let(:completed, OpenAI::Beta::Threads::RunStatus::TaggedSymbol) + INCOMPLETE = + T.let(:incomplete, OpenAI::Beta::Threads::RunStatus::TaggedSymbol) + EXPIRED = + T.let(:expired, OpenAI::Beta::Threads::RunStatus::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::RunStatus::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/run_submit_tool_outputs_params.rbi b/rbi/openai/models/beta/threads/run_submit_tool_outputs_params.rbi new file mode 100644 index 00000000..490d18a2 --- /dev/null +++ b/rbi/openai/models/beta/threads/run_submit_tool_outputs_params.rbi @@ -0,0 +1,111 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class RunSubmitToolOutputsParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunSubmitToolOutputsParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :thread_id + + # A list of tools for which the outputs are being submitted. + sig do + returns( + T::Array[ + OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput + ] + ) + end + attr_accessor :tool_outputs + + sig do + params( + thread_id: String, + tool_outputs: + T::Array[ + OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput::OrHash + ], + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + thread_id:, + # A list of tools for which the outputs are being submitted. + tool_outputs:, + request_options: {} + ) + end + + sig do + override.returns( + { + thread_id: String, + tool_outputs: + T::Array[ + OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput + ], + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + class ToolOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput, + OpenAI::Internal::AnyHash + ) + end + + # The output of the tool call to be submitted to continue the run. + sig { returns(T.nilable(String)) } + attr_reader :output + + sig { params(output: String).void } + attr_writer :output + + # The ID of the tool call in the `required_action` object within the run object + # the output is being submitted for. + sig { returns(T.nilable(String)) } + attr_reader :tool_call_id + + sig { params(tool_call_id: String).void } + attr_writer :tool_call_id + + sig do + params(output: String, tool_call_id: String).returns( + T.attached_class + ) + end + def self.new( + # The output of the tool call to be submitted to continue the run. + output: nil, + # The ID of the tool call in the `required_action` object within the run object + # the output is being submitted for. + tool_call_id: nil + ) + end + + sig { override.returns({ output: String, tool_call_id: String }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/run_update_params.rbi b/rbi/openai/models/beta/threads/run_update_params.rbi new file mode 100644 index 00000000..f92b8cea --- /dev/null +++ b/rbi/openai/models/beta/threads/run_update_params.rbi @@ -0,0 +1,66 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class RunUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::RunUpdateParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :thread_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + thread_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + thread_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + thread_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/code_interpreter_logs.rbi b/rbi/openai/models/beta/threads/runs/code_interpreter_logs.rbi new file mode 100644 index 00000000..06eb6486 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/code_interpreter_logs.rbi @@ -0,0 +1,58 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class CodeInterpreterLogs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterLogs, + OpenAI::Internal::AnyHash + ) + end + + # The index of the output in the outputs array. + sig { returns(Integer) } + attr_accessor :index + + # Always `logs`. + sig { returns(Symbol) } + attr_accessor :type + + # The text output from the Code Interpreter tool call. + sig { returns(T.nilable(String)) } + attr_reader :logs + + sig { params(logs: String).void } + attr_writer :logs + + # Text output from the Code Interpreter tool call as part of a run step. + sig do + params(index: Integer, logs: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The index of the output in the outputs array. + index:, + # The text output from the Code Interpreter tool call. + logs: nil, + # Always `logs`. + type: :logs + ) + end + + sig do + override.returns({ index: Integer, type: Symbol, logs: String }) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/code_interpreter_output_image.rbi b/rbi/openai/models/beta/threads/runs/code_interpreter_output_image.rbi new file mode 100644 index 00000000..52c083be --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/code_interpreter_output_image.rbi @@ -0,0 +1,106 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class CodeInterpreterOutputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage, + OpenAI::Internal::AnyHash + ) + end + + # The index of the output in the outputs array. + sig { returns(Integer) } + attr_accessor :index + + # Always `image`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image + ) + ) + end + attr_reader :image + + sig do + params( + image: + OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image::OrHash + ).void + end + attr_writer :image + + sig do + params( + index: Integer, + image: + OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the output in the outputs array. + index:, + image: nil, + # Always `image`. + type: :image + ) + end + + sig do + override.returns( + { + index: Integer, + type: Symbol, + image: + OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image + } + ) + end + def to_hash + end + + class Image < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image, + OpenAI::Internal::AnyHash + ) + end + + # The [file](https://platform.openai.com/docs/api-reference/files) ID of the + # image. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + sig { params(file_id: String).returns(T.attached_class) } + def self.new( + # The [file](https://platform.openai.com/docs/api-reference/files) ID of the + # image. + file_id: nil + ) + end + + sig { override.returns({ file_id: String }) } + def to_hash + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi b/rbi/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi new file mode 100644 index 00000000..1b4be0d3 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi @@ -0,0 +1,281 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class CodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the tool call. + sig { returns(String) } + attr_accessor :id + + # The Code Interpreter tool call definition. + sig do + returns( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter + ) + end + attr_reader :code_interpreter + + sig do + params( + code_interpreter: + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::OrHash + ).void + end + attr_writer :code_interpreter + + # The type of tool call. This is always going to be `code_interpreter` for this + # type of tool call. + sig { returns(Symbol) } + attr_accessor :type + + # Details of the Code Interpreter tool call the run step was involved in. + sig do + params( + id: String, + code_interpreter: + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the tool call. + id:, + # The Code Interpreter tool call definition. + code_interpreter:, + # The type of tool call. This is always going to be `code_interpreter` for this + # type of tool call. + type: :code_interpreter + ) + end + + sig do + override.returns( + { + id: String, + code_interpreter: + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter, + type: Symbol + } + ) + end + def to_hash + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # The input to the Code Interpreter tool call. + sig { returns(String) } + attr_accessor :input + + # The outputs from the Code Interpreter tool call. Code Interpreter can output one + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. + sig do + returns( + T::Array[ + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Variants + ] + ) + end + attr_accessor :outputs + + # The Code Interpreter tool call definition. + sig do + params( + input: String, + outputs: + T::Array[ + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs::OrHash, + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::OrHash + ) + ] + ).returns(T.attached_class) + end + def self.new( + # The input to the Code Interpreter tool call. + input:, + # The outputs from the Code Interpreter tool call. Code Interpreter can output one + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. + outputs: + ) + end + + sig do + override.returns( + { + input: String, + outputs: + T::Array[ + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Variants + ] + } + ) + end + def to_hash + end + + # Text output from the Code Interpreter tool call as part of a run step. + module Output + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image + ) + end + + class Logs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, + OpenAI::Internal::AnyHash + ) + end + + # The text output from the Code Interpreter tool call. + sig { returns(String) } + attr_accessor :logs + + # Always `logs`. + sig { returns(Symbol) } + attr_accessor :type + + # Text output from the Code Interpreter tool call as part of a run step. + sig do + params(logs: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # The text output from the Code Interpreter tool call. + logs:, + # Always `logs`. + type: :logs + ) + end + + sig { override.returns({ logs: String, type: Symbol }) } + def to_hash + end + end + + class Image < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image + ) + end + attr_reader :image + + sig do + params( + image: + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image::OrHash + ).void + end + attr_writer :image + + # Always `image`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + image: + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + image:, + # Always `image`. + type: :image + ) + end + + sig do + override.returns( + { + image: + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image, + type: Symbol + } + ) + end + def to_hash + end + + class Image < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image, + OpenAI::Internal::AnyHash + ) + end + + # The [file](https://platform.openai.com/docs/api-reference/files) ID of the + # image. + sig { returns(String) } + attr_accessor :file_id + + sig { params(file_id: String).returns(T.attached_class) } + def self.new( + # The [file](https://platform.openai.com/docs/api-reference/files) ID of the + # image. + file_id: + ) + end + + sig { override.returns({ file_id: String }) } + def to_hash + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi b/rbi/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi new file mode 100644 index 00000000..03693d1b --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi @@ -0,0 +1,196 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class CodeInterpreterToolCallDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta, + OpenAI::Internal::AnyHash + ) + end + + # The index of the tool call in the tool calls array. + sig { returns(Integer) } + attr_accessor :index + + # The type of tool call. This is always going to be `code_interpreter` for this + # type of tool call. + sig { returns(Symbol) } + attr_accessor :type + + # The ID of the tool call. + sig { returns(T.nilable(String)) } + attr_reader :id + + sig { params(id: String).void } + attr_writer :id + + # The Code Interpreter tool call definition. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter + ) + ) + end + attr_reader :code_interpreter + + sig do + params( + code_interpreter: + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::OrHash + ).void + end + attr_writer :code_interpreter + + # Details of the Code Interpreter tool call the run step was involved in. + sig do + params( + index: Integer, + id: String, + code_interpreter: + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the tool call in the tool calls array. + index:, + # The ID of the tool call. + id: nil, + # The Code Interpreter tool call definition. + code_interpreter: nil, + # The type of tool call. This is always going to be `code_interpreter` for this + # type of tool call. + type: :code_interpreter + ) + end + + sig do + override.returns( + { + index: Integer, + type: Symbol, + id: String, + code_interpreter: + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter + } + ) + end + def to_hash + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # The input to the Code Interpreter tool call. + sig { returns(T.nilable(String)) } + attr_reader :input + + sig { params(input: String).void } + attr_writer :input + + # The outputs from the Code Interpreter tool call. Code Interpreter can output one + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::Output::Variants + ] + ) + ) + end + attr_reader :outputs + + sig do + params( + outputs: + T::Array[ + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterLogs::OrHash, + OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::OrHash + ) + ] + ).void + end + attr_writer :outputs + + # The Code Interpreter tool call definition. + sig do + params( + input: String, + outputs: + T::Array[ + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterLogs::OrHash, + OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::OrHash + ) + ] + ).returns(T.attached_class) + end + def self.new( + # The input to the Code Interpreter tool call. + input: nil, + # The outputs from the Code Interpreter tool call. Code Interpreter can output one + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. + outputs: nil + ) + end + + sig do + override.returns( + { + input: String, + outputs: + T::Array[ + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::Output::Variants + ] + } + ) + end + def to_hash + end + + # Text output from the Code Interpreter tool call as part of a run step. + module Output + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterLogs, + OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::Output::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/file_search_tool_call.rbi b/rbi/openai/models/beta/threads/runs/file_search_tool_call.rbi new file mode 100644 index 00000000..343bec61 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/file_search_tool_call.rbi @@ -0,0 +1,432 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class FileSearchToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::FileSearchToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the tool call object. + sig { returns(String) } + attr_accessor :id + + # For now, this is always going to be an empty object. + sig do + returns( + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch + ) + end + attr_reader :file_search + + sig do + params( + file_search: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::OrHash + ).void + end + attr_writer :file_search + + # The type of tool call. This is always going to be `file_search` for this type of + # tool call. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + id: String, + file_search: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the tool call object. + id:, + # For now, this is always going to be an empty object. + file_search:, + # The type of tool call. This is always going to be `file_search` for this type of + # tool call. + type: :file_search + ) + end + + sig do + override.returns( + { + id: String, + file_search: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch, + type: Symbol + } + ) + end + def to_hash + end + + class FileSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch, + OpenAI::Internal::AnyHash + ) + end + + # The ranking options for the file search. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions + ) + ) + end + attr_reader :ranking_options + + sig do + params( + ranking_options: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::OrHash + ).void + end + attr_writer :ranking_options + + # The results of the file search. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result + ] + ) + ) + end + attr_reader :results + + sig do + params( + results: + T::Array[ + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::OrHash + ] + ).void + end + attr_writer :results + + # For now, this is always going to be an empty object. + sig do + params( + ranking_options: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::OrHash, + results: + T::Array[ + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::OrHash + ] + ).returns(T.attached_class) + end + def self.new( + # The ranking options for the file search. + ranking_options: nil, + # The results of the file search. + results: nil + ) + end + + sig do + override.returns( + { + ranking_options: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, + results: + T::Array[ + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result + ] + } + ) + end + def to_hash + end + + class RankingOptions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, + OpenAI::Internal::AnyHash + ) + end + + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. + sig do + returns( + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker::TaggedSymbol + ) + end + attr_accessor :ranker + + # The score threshold for the file search. All values must be a floating point + # number between 0 and 1. + sig { returns(Float) } + attr_accessor :score_threshold + + # The ranking options for the file search. + sig do + params( + ranker: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker::OrSymbol, + score_threshold: Float + ).returns(T.attached_class) + end + def self.new( + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. + ranker:, + # The score threshold for the file search. All values must be a floating point + # number between 0 and 1. + score_threshold: + ) + end + + sig do + override.returns( + { + ranker: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker::TaggedSymbol, + score_threshold: Float + } + ) + end + def to_hash + end + + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. + module Ranker + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker::TaggedSymbol + ) + DEFAULT_2024_08_21 = + T.let( + :default_2024_08_21, + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class Result < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file that result was found in. + sig { returns(String) } + attr_accessor :file_id + + # The name of the file that result was found in. + sig { returns(String) } + attr_accessor :file_name + + # The score of the result. All values must be a floating point number between 0 + # and 1. + sig { returns(Float) } + attr_accessor :score + + # The content of the result that was found. The content is only included if + # requested via the include query parameter. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content + ] + ) + ) + end + attr_reader :content + + sig do + params( + content: + T::Array[ + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::OrHash + ] + ).void + end + attr_writer :content + + # A result instance of the file search. + sig do + params( + file_id: String, + file_name: String, + score: Float, + content: + T::Array[ + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::OrHash + ] + ).returns(T.attached_class) + end + def self.new( + # The ID of the file that result was found in. + file_id:, + # The name of the file that result was found in. + file_name:, + # The score of the result. All values must be a floating point number between 0 + # and 1. + score:, + # The content of the result that was found. The content is only included if + # requested via the include query parameter. + content: nil + ) + end + + sig do + override.returns( + { + file_id: String, + file_name: String, + score: Float, + content: + T::Array[ + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content + ] + } + ) + end + def to_hash + end + + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content, + OpenAI::Internal::AnyHash + ) + end + + # The text content of the file. + sig { returns(T.nilable(String)) } + attr_reader :text + + sig { params(text: String).void } + attr_writer :text + + # The type of the content. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type::TaggedSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type::OrSymbol + ).void + end + attr_writer :type + + sig do + params( + text: String, + type: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The text content of the file. + text: nil, + # The type of the content. + type: nil + ) + end + + sig do + override.returns( + { + text: String, + type: + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type::TaggedSymbol + } + ) + end + def to_hash + end + + # The type of the content. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi b/rbi/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi new file mode 100644 index 00000000..78d5bb30 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi @@ -0,0 +1,75 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class FileSearchToolCallDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::FileSearchToolCallDelta, + OpenAI::Internal::AnyHash + ) + end + + # For now, this is always going to be an empty object. + sig { returns(T.anything) } + attr_accessor :file_search + + # The index of the tool call in the tool calls array. + sig { returns(Integer) } + attr_accessor :index + + # The type of tool call. This is always going to be `file_search` for this type of + # tool call. + sig { returns(Symbol) } + attr_accessor :type + + # The ID of the tool call object. + sig { returns(T.nilable(String)) } + attr_reader :id + + sig { params(id: String).void } + attr_writer :id + + sig do + params( + file_search: T.anything, + index: Integer, + id: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # For now, this is always going to be an empty object. + file_search:, + # The index of the tool call in the tool calls array. + index:, + # The ID of the tool call object. + id: nil, + # The type of tool call. This is always going to be `file_search` for this type of + # tool call. + type: :file_search + ) + end + + sig do + override.returns( + { + file_search: T.anything, + index: Integer, + type: Symbol, + id: String + } + ) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/function_tool_call.rbi b/rbi/openai/models/beta/threads/runs/function_tool_call.rbi new file mode 100644 index 00000000..55ee8104 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/function_tool_call.rbi @@ -0,0 +1,128 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class FunctionToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::FunctionToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the tool call object. + sig { returns(String) } + attr_accessor :id + + # The definition of the function that was called. + sig do + returns(OpenAI::Beta::Threads::Runs::FunctionToolCall::Function) + end + attr_reader :function + + sig do + params( + function: + OpenAI::Beta::Threads::Runs::FunctionToolCall::Function::OrHash + ).void + end + attr_writer :function + + # The type of tool call. This is always going to be `function` for this type of + # tool call. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + id: String, + function: + OpenAI::Beta::Threads::Runs::FunctionToolCall::Function::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the tool call object. + id:, + # The definition of the function that was called. + function:, + # The type of tool call. This is always going to be `function` for this type of + # tool call. + type: :function + ) + end + + sig do + override.returns( + { + id: String, + function: + OpenAI::Beta::Threads::Runs::FunctionToolCall::Function, + type: Symbol + } + ) + end + def to_hash + end + + class Function < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::FunctionToolCall::Function, + OpenAI::Internal::AnyHash + ) + end + + # The arguments passed to the function. + sig { returns(String) } + attr_accessor :arguments + + # The name of the function. + sig { returns(String) } + attr_accessor :name + + # The output of the function. This will be `null` if the outputs have not been + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. + sig { returns(T.nilable(String)) } + attr_accessor :output + + # The definition of the function that was called. + sig do + params( + arguments: String, + name: String, + output: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + # The arguments passed to the function. + arguments:, + # The name of the function. + name:, + # The output of the function. This will be `null` if the outputs have not been + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. + output: + ) + end + + sig do + override.returns( + { arguments: String, name: String, output: T.nilable(String) } + ) + end + def to_hash + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/function_tool_call_delta.rbi b/rbi/openai/models/beta/threads/runs/function_tool_call_delta.rbi new file mode 100644 index 00000000..73ec53a9 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/function_tool_call_delta.rbi @@ -0,0 +1,149 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class FunctionToolCallDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::FunctionToolCallDelta, + OpenAI::Internal::AnyHash + ) + end + + # The index of the tool call in the tool calls array. + sig { returns(Integer) } + attr_accessor :index + + # The type of tool call. This is always going to be `function` for this type of + # tool call. + sig { returns(Symbol) } + attr_accessor :type + + # The ID of the tool call object. + sig { returns(T.nilable(String)) } + attr_reader :id + + sig { params(id: String).void } + attr_writer :id + + # The definition of the function that was called. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function + ) + ) + end + attr_reader :function + + sig do + params( + function: + OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function::OrHash + ).void + end + attr_writer :function + + sig do + params( + index: Integer, + id: String, + function: + OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the tool call in the tool calls array. + index:, + # The ID of the tool call object. + id: nil, + # The definition of the function that was called. + function: nil, + # The type of tool call. This is always going to be `function` for this type of + # tool call. + type: :function + ) + end + + sig do + override.returns( + { + index: Integer, + type: Symbol, + id: String, + function: + OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function + } + ) + end + def to_hash + end + + class Function < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function, + OpenAI::Internal::AnyHash + ) + end + + # The arguments passed to the function. + sig { returns(T.nilable(String)) } + attr_reader :arguments + + sig { params(arguments: String).void } + attr_writer :arguments + + # The name of the function. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + # The output of the function. This will be `null` if the outputs have not been + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. + sig { returns(T.nilable(String)) } + attr_accessor :output + + # The definition of the function that was called. + sig do + params( + arguments: String, + name: String, + output: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + # The arguments passed to the function. + arguments: nil, + # The name of the function. + name: nil, + # The output of the function. This will be `null` if the outputs have not been + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. + output: nil + ) + end + + sig do + override.returns( + { arguments: String, name: String, output: T.nilable(String) } + ) + end + def to_hash + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/message_creation_step_details.rbi b/rbi/openai/models/beta/threads/runs/message_creation_step_details.rbi new file mode 100644 index 00000000..1c11f5d0 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/message_creation_step_details.rbi @@ -0,0 +1,92 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class MessageCreationStepDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::MessageCreationStepDetails, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation + ) + end + attr_reader :message_creation + + sig do + params( + message_creation: + OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation::OrHash + ).void + end + attr_writer :message_creation + + # Always `message_creation`. + sig { returns(Symbol) } + attr_accessor :type + + # Details of the message creation by the run step. + sig do + params( + message_creation: + OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + message_creation:, + # Always `message_creation`. + type: :message_creation + ) + end + + sig do + override.returns( + { + message_creation: + OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, + type: Symbol + } + ) + end + def to_hash + end + + class MessageCreation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the message that was created by this run step. + sig { returns(String) } + attr_accessor :message_id + + sig { params(message_id: String).returns(T.attached_class) } + def self.new( + # The ID of the message that was created by this run step. + message_id: + ) + end + + sig { override.returns({ message_id: String }) } + def to_hash + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/run_step.rbi b/rbi/openai/models/beta/threads/runs/run_step.rbi new file mode 100644 index 00000000..1a87ede6 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/run_step.rbi @@ -0,0 +1,485 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + RunStep = Runs::RunStep + + module Runs + class RunStep < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::RunStep, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the run step, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) + # associated with the run step. + sig { returns(String) } + attr_accessor :assistant_id + + # The Unix timestamp (in seconds) for when the run step was cancelled. + sig { returns(T.nilable(Integer)) } + attr_accessor :cancelled_at + + # The Unix timestamp (in seconds) for when the run step completed. + sig { returns(T.nilable(Integer)) } + attr_accessor :completed_at + + # The Unix timestamp (in seconds) for when the run step was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The Unix timestamp (in seconds) for when the run step expired. A step is + # considered expired if the parent run is expired. + sig { returns(T.nilable(Integer)) } + attr_accessor :expired_at + + # The Unix timestamp (in seconds) for when the run step failed. + sig { returns(T.nilable(Integer)) } + attr_accessor :failed_at + + # The last error associated with this run step. Will be `null` if there are no + # errors. + sig do + returns( + T.nilable(OpenAI::Beta::Threads::Runs::RunStep::LastError) + ) + end + attr_reader :last_error + + sig do + params( + last_error: + T.nilable( + OpenAI::Beta::Threads::Runs::RunStep::LastError::OrHash + ) + ).void + end + attr_writer :last_error + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The object type, which is always `thread.run.step`. + sig { returns(Symbol) } + attr_accessor :object + + # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that + # this run step is a part of. + sig { returns(String) } + attr_accessor :run_id + + # The status of the run step, which can be either `in_progress`, `cancelled`, + # `failed`, `completed`, or `expired`. + sig do + returns( + OpenAI::Beta::Threads::Runs::RunStep::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The details of the run step. + sig do + returns( + OpenAI::Beta::Threads::Runs::RunStep::StepDetails::Variants + ) + end + attr_accessor :step_details + + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was run. + sig { returns(String) } + attr_accessor :thread_id + + # The type of run step, which can be either `message_creation` or `tool_calls`. + sig do + returns(OpenAI::Beta::Threads::Runs::RunStep::Type::TaggedSymbol) + end + attr_accessor :type + + # Usage statistics related to the run step. This value will be `null` while the + # run step's status is `in_progress`. + sig do + returns(T.nilable(OpenAI::Beta::Threads::Runs::RunStep::Usage)) + end + attr_reader :usage + + sig do + params( + usage: + T.nilable(OpenAI::Beta::Threads::Runs::RunStep::Usage::OrHash) + ).void + end + attr_writer :usage + + # Represents a step in execution of a run. + sig do + params( + id: String, + assistant_id: String, + cancelled_at: T.nilable(Integer), + completed_at: T.nilable(Integer), + created_at: Integer, + expired_at: T.nilable(Integer), + failed_at: T.nilable(Integer), + last_error: + T.nilable( + OpenAI::Beta::Threads::Runs::RunStep::LastError::OrHash + ), + metadata: T.nilable(T::Hash[Symbol, String]), + run_id: String, + status: OpenAI::Beta::Threads::Runs::RunStep::Status::OrSymbol, + step_details: + T.any( + OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::OrHash, + OpenAI::Beta::Threads::Runs::ToolCallsStepDetails::OrHash + ), + thread_id: String, + type: OpenAI::Beta::Threads::Runs::RunStep::Type::OrSymbol, + usage: + T.nilable( + OpenAI::Beta::Threads::Runs::RunStep::Usage::OrHash + ), + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier of the run step, which can be referenced in API endpoints. + id:, + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) + # associated with the run step. + assistant_id:, + # The Unix timestamp (in seconds) for when the run step was cancelled. + cancelled_at:, + # The Unix timestamp (in seconds) for when the run step completed. + completed_at:, + # The Unix timestamp (in seconds) for when the run step was created. + created_at:, + # The Unix timestamp (in seconds) for when the run step expired. A step is + # considered expired if the parent run is expired. + expired_at:, + # The Unix timestamp (in seconds) for when the run step failed. + failed_at:, + # The last error associated with this run step. Will be `null` if there are no + # errors. + last_error:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that + # this run step is a part of. + run_id:, + # The status of the run step, which can be either `in_progress`, `cancelled`, + # `failed`, `completed`, or `expired`. + status:, + # The details of the run step. + step_details:, + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was run. + thread_id:, + # The type of run step, which can be either `message_creation` or `tool_calls`. + type:, + # Usage statistics related to the run step. This value will be `null` while the + # run step's status is `in_progress`. + usage:, + # The object type, which is always `thread.run.step`. + object: :"thread.run.step" + ) + end + + sig do + override.returns( + { + id: String, + assistant_id: String, + cancelled_at: T.nilable(Integer), + completed_at: T.nilable(Integer), + created_at: Integer, + expired_at: T.nilable(Integer), + failed_at: T.nilable(Integer), + last_error: + T.nilable(OpenAI::Beta::Threads::Runs::RunStep::LastError), + metadata: T.nilable(T::Hash[Symbol, String]), + object: Symbol, + run_id: String, + status: + OpenAI::Beta::Threads::Runs::RunStep::Status::TaggedSymbol, + step_details: + OpenAI::Beta::Threads::Runs::RunStep::StepDetails::Variants, + thread_id: String, + type: + OpenAI::Beta::Threads::Runs::RunStep::Type::TaggedSymbol, + usage: T.nilable(OpenAI::Beta::Threads::Runs::RunStep::Usage) + } + ) + end + def to_hash + end + + class LastError < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::RunStep::LastError, + OpenAI::Internal::AnyHash + ) + end + + # One of `server_error` or `rate_limit_exceeded`. + sig do + returns( + OpenAI::Beta::Threads::Runs::RunStep::LastError::Code::TaggedSymbol + ) + end + attr_accessor :code + + # A human-readable description of the error. + sig { returns(String) } + attr_accessor :message + + # The last error associated with this run step. Will be `null` if there are no + # errors. + sig do + params( + code: + OpenAI::Beta::Threads::Runs::RunStep::LastError::Code::OrSymbol, + message: String + ).returns(T.attached_class) + end + def self.new( + # One of `server_error` or `rate_limit_exceeded`. + code:, + # A human-readable description of the error. + message: + ) + end + + sig do + override.returns( + { + code: + OpenAI::Beta::Threads::Runs::RunStep::LastError::Code::TaggedSymbol, + message: String + } + ) + end + def to_hash + end + + # One of `server_error` or `rate_limit_exceeded`. + module Code + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::Threads::Runs::RunStep::LastError::Code + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SERVER_ERROR = + T.let( + :server_error, + OpenAI::Beta::Threads::Runs::RunStep::LastError::Code::TaggedSymbol + ) + RATE_LIMIT_EXCEEDED = + T.let( + :rate_limit_exceeded, + OpenAI::Beta::Threads::Runs::RunStep::LastError::Code::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::RunStep::LastError::Code::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + # The status of the run step, which can be either `in_progress`, `cancelled`, + # `failed`, `completed`, or `expired`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::Runs::RunStep::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Beta::Threads::Runs::RunStep::Status::TaggedSymbol + ) + CANCELLED = + T.let( + :cancelled, + OpenAI::Beta::Threads::Runs::RunStep::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::Beta::Threads::Runs::RunStep::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Beta::Threads::Runs::RunStep::Status::TaggedSymbol + ) + EXPIRED = + T.let( + :expired, + OpenAI::Beta::Threads::Runs::RunStep::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::RunStep::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The details of the run step. + module StepDetails + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::MessageCreationStepDetails, + OpenAI::Beta::Threads::Runs::ToolCallsStepDetails + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::RunStep::StepDetails::Variants + ] + ) + end + def self.variants + end + end + + # The type of run step, which can be either `message_creation` or `tool_calls`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::Runs::RunStep::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE_CREATION = + T.let( + :message_creation, + OpenAI::Beta::Threads::Runs::RunStep::Type::TaggedSymbol + ) + TOOL_CALLS = + T.let( + :tool_calls, + OpenAI::Beta::Threads::Runs::RunStep::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::RunStep::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::RunStep::Usage, + OpenAI::Internal::AnyHash + ) + end + + # Number of completion tokens used over the course of the run step. + sig { returns(Integer) } + attr_accessor :completion_tokens + + # Number of prompt tokens used over the course of the run step. + sig { returns(Integer) } + attr_accessor :prompt_tokens + + # Total number of tokens used (prompt + completion). + sig { returns(Integer) } + attr_accessor :total_tokens + + # Usage statistics related to the run step. This value will be `null` while the + # run step's status is `in_progress`. + sig do + params( + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # Number of completion tokens used over the course of the run step. + completion_tokens:, + # Number of prompt tokens used over the course of the run step. + prompt_tokens:, + # Total number of tokens used (prompt + completion). + total_tokens: + ) + end + + sig do + override.returns( + { + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/run_step_delta.rbi b/rbi/openai/models/beta/threads/runs/run_step_delta.rbi new file mode 100644 index 00000000..ba659f76 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/run_step_delta.rbi @@ -0,0 +1,94 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + RunStepDelta = Runs::RunStepDelta + + module Runs + class RunStepDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::RunStepDelta, + OpenAI::Internal::AnyHash + ) + end + + # The details of the run step. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::Runs::RunStepDelta::StepDetails::Variants + ) + ) + end + attr_reader :step_details + + sig do + params( + step_details: + T.any( + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::OrHash, + OpenAI::Beta::Threads::Runs::ToolCallDeltaObject::OrHash + ) + ).void + end + attr_writer :step_details + + # The delta containing the fields that have changed on the run step. + sig do + params( + step_details: + T.any( + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::OrHash, + OpenAI::Beta::Threads::Runs::ToolCallDeltaObject::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # The details of the run step. + step_details: nil + ) + end + + sig do + override.returns( + { + step_details: + OpenAI::Beta::Threads::Runs::RunStepDelta::StepDetails::Variants + } + ) + end + def to_hash + end + + # The details of the run step. + module StepDetails + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta, + OpenAI::Beta::Threads::Runs::ToolCallDeltaObject + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepDelta::StepDetails::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/run_step_delta_event.rbi b/rbi/openai/models/beta/threads/runs/run_step_delta_event.rbi new file mode 100644 index 00000000..d3a77c15 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/run_step_delta_event.rbi @@ -0,0 +1,73 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + RunStepDeltaEvent = Runs::RunStepDeltaEvent + + module Runs + class RunStepDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::RunStepDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the run step, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The delta containing the fields that have changed on the run step. + sig { returns(OpenAI::Beta::Threads::Runs::RunStepDelta) } + attr_reader :delta + + sig do + params( + delta: OpenAI::Beta::Threads::Runs::RunStepDelta::OrHash + ).void + end + attr_writer :delta + + # The object type, which is always `thread.run.step.delta`. + sig { returns(Symbol) } + attr_accessor :object + + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. + sig do + params( + id: String, + delta: OpenAI::Beta::Threads::Runs::RunStepDelta::OrHash, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier of the run step, which can be referenced in API endpoints. + id:, + # The delta containing the fields that have changed on the run step. + delta:, + # The object type, which is always `thread.run.step.delta`. + object: :"thread.run.step.delta" + ) + end + + sig do + override.returns( + { + id: String, + delta: OpenAI::Beta::Threads::Runs::RunStepDelta, + object: Symbol + } + ) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi b/rbi/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi new file mode 100644 index 00000000..b0304be4 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi @@ -0,0 +1,99 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + RunStepDeltaMessageDelta = Runs::RunStepDeltaMessageDelta + + module Runs + class RunStepDeltaMessageDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta, + OpenAI::Internal::AnyHash + ) + end + + # Always `message_creation`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation + ) + ) + end + attr_reader :message_creation + + sig do + params( + message_creation: + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation::OrHash + ).void + end + attr_writer :message_creation + + # Details of the message creation by the run step. + sig do + params( + message_creation: + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + message_creation: nil, + # Always `message_creation`. + type: :message_creation + ) + end + + sig do + override.returns( + { + type: Symbol, + message_creation: + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation + } + ) + end + def to_hash + end + + class MessageCreation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the message that was created by this run step. + sig { returns(T.nilable(String)) } + attr_reader :message_id + + sig { params(message_id: String).void } + attr_writer :message_id + + sig { params(message_id: String).returns(T.attached_class) } + def self.new( + # The ID of the message that was created by this run step. + message_id: nil + ) + end + + sig { override.returns({ message_id: String }) } + def to_hash + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/run_step_include.rbi b/rbi/openai/models/beta/threads/runs/run_step_include.rbi new file mode 100644 index 00000000..1b0fdb68 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/run_step_include.rbi @@ -0,0 +1,39 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + RunStepInclude = Runs::RunStepInclude + + module Runs + module RunStepInclude + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Beta::Threads::Runs::RunStepInclude) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + STEP_DETAILS_TOOL_CALLS_FILE_SEARCH_RESULTS_CONTENT = + T.let( + :"step_details.tool_calls[*].file_search.results[*].content", + OpenAI::Beta::Threads::Runs::RunStepInclude::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/step_list_params.rbi b/rbi/openai/models/beta/threads/runs/step_list_params.rbi new file mode 100644 index 00000000..618a115d --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/step_list_params.rbi @@ -0,0 +1,203 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class StepListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::StepListParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :thread_id + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + sig { returns(T.nilable(String)) } + attr_reader :before + + sig { params(before: String).void } + attr_writer :before + + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ] + ) + ) + end + attr_reader :include + + sig do + params( + include: + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ] + ).void + end + attr_writer :include + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + sig do + returns( + T.nilable( + OpenAI::Beta::Threads::Runs::StepListParams::Order::OrSymbol + ) + ) + end + attr_reader :order + + sig do + params( + order: + OpenAI::Beta::Threads::Runs::StepListParams::Order::OrSymbol + ).void + end + attr_writer :order + + sig do + params( + thread_id: String, + after: String, + before: String, + include: + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ], + limit: Integer, + order: + OpenAI::Beta::Threads::Runs::StepListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + thread_id:, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + include: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + thread_id: String, + after: String, + before: String, + include: + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ], + limit: Integer, + order: + OpenAI::Beta::Threads::Runs::StepListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Beta::Threads::Runs::StepListParams::Order + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let( + :asc, + OpenAI::Beta::Threads::Runs::StepListParams::Order::TaggedSymbol + ) + DESC = + T.let( + :desc, + OpenAI::Beta::Threads::Runs::StepListParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Beta::Threads::Runs::StepListParams::Order::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/step_retrieve_params.rbi b/rbi/openai/models/beta/threads/runs/step_retrieve_params.rbi new file mode 100644 index 00000000..c1a7ae02 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/step_retrieve_params.rbi @@ -0,0 +1,100 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class StepRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::StepRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :thread_id + + sig { returns(String) } + attr_accessor :run_id + + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ] + ) + ) + end + attr_reader :include + + sig do + params( + include: + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ] + ).void + end + attr_writer :include + + sig do + params( + thread_id: String, + run_id: String, + include: + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ], + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + thread_id:, + run_id:, + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + include: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + thread_id: String, + run_id: String, + include: + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ], + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/tool_call.rbi b/rbi/openai/models/beta/threads/runs/tool_call.rbi new file mode 100644 index 00000000..a202bb18 --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/tool_call.rbi @@ -0,0 +1,33 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + # Details of the Code Interpreter tool call the run step was involved in. + module ToolCall + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall, + OpenAI::Beta::Threads::Runs::FileSearchToolCall, + OpenAI::Beta::Threads::Runs::FunctionToolCall + ) + end + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::Runs::ToolCall::Variants] + ) + end + def self.variants + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/tool_call_delta.rbi b/rbi/openai/models/beta/threads/runs/tool_call_delta.rbi new file mode 100644 index 00000000..f76f8eaa --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/tool_call_delta.rbi @@ -0,0 +1,33 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + # Details of the Code Interpreter tool call the run step was involved in. + module ToolCallDelta + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta, + OpenAI::Beta::Threads::Runs::FileSearchToolCallDelta, + OpenAI::Beta::Threads::Runs::FunctionToolCallDelta + ) + end + + sig do + override.returns( + T::Array[OpenAI::Beta::Threads::Runs::ToolCallDelta::Variants] + ) + end + def self.variants + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/tool_call_delta_object.rbi b/rbi/openai/models/beta/threads/runs/tool_call_delta_object.rbi new file mode 100644 index 00000000..0dcb1dac --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/tool_call_delta_object.rbi @@ -0,0 +1,89 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class ToolCallDeltaObject < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::ToolCallDeltaObject, + OpenAI::Internal::AnyHash + ) + end + + # Always `tool_calls`. + sig { returns(Symbol) } + attr_accessor :type + + # An array of tool calls the run step was involved in. These can be associated + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. + sig do + returns( + T.nilable( + T::Array[OpenAI::Beta::Threads::Runs::ToolCallDelta::Variants] + ) + ) + end + attr_reader :tool_calls + + sig do + params( + tool_calls: + T::Array[ + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::OrHash, + OpenAI::Beta::Threads::Runs::FileSearchToolCallDelta::OrHash, + OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::OrHash + ) + ] + ).void + end + attr_writer :tool_calls + + # Details of the tool call. + sig do + params( + tool_calls: + T::Array[ + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::OrHash, + OpenAI::Beta::Threads::Runs::FileSearchToolCallDelta::OrHash, + OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::OrHash + ) + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # An array of tool calls the run step was involved in. These can be associated + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. + tool_calls: nil, + # Always `tool_calls`. + type: :tool_calls + ) + end + + sig do + override.returns( + { + type: Symbol, + tool_calls: + T::Array[ + OpenAI::Beta::Threads::Runs::ToolCallDelta::Variants + ] + } + ) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/runs/tool_calls_step_details.rbi b/rbi/openai/models/beta/threads/runs/tool_calls_step_details.rbi new file mode 100644 index 00000000..cda8ec9b --- /dev/null +++ b/rbi/openai/models/beta/threads/runs/tool_calls_step_details.rbi @@ -0,0 +1,69 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + module Runs + class ToolCallsStepDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::Runs::ToolCallsStepDetails, + OpenAI::Internal::AnyHash + ) + end + + # An array of tool calls the run step was involved in. These can be associated + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. + sig do + returns(T::Array[OpenAI::Beta::Threads::Runs::ToolCall::Variants]) + end + attr_accessor :tool_calls + + # Always `tool_calls`. + sig { returns(Symbol) } + attr_accessor :type + + # Details of the tool call. + sig do + params( + tool_calls: + T::Array[ + T.any( + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::OrHash, + OpenAI::Beta::Threads::Runs::FileSearchToolCall::OrHash, + OpenAI::Beta::Threads::Runs::FunctionToolCall::OrHash + ) + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # An array of tool calls the run step was involved in. These can be associated + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. + tool_calls:, + # Always `tool_calls`. + type: :tool_calls + ) + end + + sig do + override.returns( + { + tool_calls: + T::Array[OpenAI::Beta::Threads::Runs::ToolCall::Variants], + type: Symbol + } + ) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/text.rbi b/rbi/openai/models/beta/threads/text.rbi new file mode 100644 index 00000000..e36d074a --- /dev/null +++ b/rbi/openai/models/beta/threads/text.rbi @@ -0,0 +1,54 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class Text < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::Threads::Text, OpenAI::Internal::AnyHash) + end + + sig { returns(T::Array[OpenAI::Beta::Threads::Annotation::Variants]) } + attr_accessor :annotations + + # The data that makes up the text. + sig { returns(String) } + attr_accessor :value + + sig do + params( + annotations: + T::Array[ + T.any( + OpenAI::Beta::Threads::FileCitationAnnotation::OrHash, + OpenAI::Beta::Threads::FilePathAnnotation::OrHash + ) + ], + value: String + ).returns(T.attached_class) + end + def self.new( + annotations:, + # The data that makes up the text. + value: + ) + end + + sig do + override.returns( + { + annotations: + T::Array[OpenAI::Beta::Threads::Annotation::Variants], + value: String + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/text_content_block.rbi b/rbi/openai/models/beta/threads/text_content_block.rbi new file mode 100644 index 00000000..81340c03 --- /dev/null +++ b/rbi/openai/models/beta/threads/text_content_block.rbi @@ -0,0 +1,51 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class TextContentBlock < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::TextContentBlock, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::Beta::Threads::Text) } + attr_reader :text + + sig { params(text: OpenAI::Beta::Threads::Text::OrHash).void } + attr_writer :text + + # Always `text`. + sig { returns(Symbol) } + attr_accessor :type + + # The text content that is part of a message. + sig do + params( + text: OpenAI::Beta::Threads::Text::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + text:, + # Always `text`. + type: :text + ) + end + + sig do + override.returns( + { text: OpenAI::Beta::Threads::Text, type: Symbol } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/text_content_block_param.rbi b/rbi/openai/models/beta/threads/text_content_block_param.rbi new file mode 100644 index 00000000..4cdfec42 --- /dev/null +++ b/rbi/openai/models/beta/threads/text_content_block_param.rbi @@ -0,0 +1,41 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class TextContentBlockParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::TextContentBlockParam, + OpenAI::Internal::AnyHash + ) + end + + # Text content to be sent to the model + sig { returns(String) } + attr_accessor :text + + # Always `text`. + sig { returns(Symbol) } + attr_accessor :type + + # The text content that is part of a message. + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # Text content to be sent to the model + text:, + # Always `text`. + type: :text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/text_delta.rbi b/rbi/openai/models/beta/threads/text_delta.rbi new file mode 100644 index 00000000..f784ef87 --- /dev/null +++ b/rbi/openai/models/beta/threads/text_delta.rbi @@ -0,0 +1,76 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class TextDelta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Beta::Threads::TextDelta, OpenAI::Internal::AnyHash) + end + + sig do + returns( + T.nilable( + T::Array[OpenAI::Beta::Threads::AnnotationDelta::Variants] + ) + ) + end + attr_reader :annotations + + sig do + params( + annotations: + T::Array[ + T.any( + OpenAI::Beta::Threads::FileCitationDeltaAnnotation::OrHash, + OpenAI::Beta::Threads::FilePathDeltaAnnotation::OrHash + ) + ] + ).void + end + attr_writer :annotations + + # The data that makes up the text. + sig { returns(T.nilable(String)) } + attr_reader :value + + sig { params(value: String).void } + attr_writer :value + + sig do + params( + annotations: + T::Array[ + T.any( + OpenAI::Beta::Threads::FileCitationDeltaAnnotation::OrHash, + OpenAI::Beta::Threads::FilePathDeltaAnnotation::OrHash + ) + ], + value: String + ).returns(T.attached_class) + end + def self.new( + annotations: nil, + # The data that makes up the text. + value: nil + ) + end + + sig do + override.returns( + { + annotations: + T::Array[OpenAI::Beta::Threads::AnnotationDelta::Variants], + value: String + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/beta/threads/text_delta_block.rbi b/rbi/openai/models/beta/threads/text_delta_block.rbi new file mode 100644 index 00000000..1d6a884a --- /dev/null +++ b/rbi/openai/models/beta/threads/text_delta_block.rbi @@ -0,0 +1,62 @@ +# typed: strong + +module OpenAI + module Models + module Beta + module Threads + class TextDeltaBlock < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Beta::Threads::TextDeltaBlock, + OpenAI::Internal::AnyHash + ) + end + + # The index of the content part in the message. + sig { returns(Integer) } + attr_accessor :index + + # Always `text`. + sig { returns(Symbol) } + attr_accessor :type + + sig { returns(T.nilable(OpenAI::Beta::Threads::TextDelta)) } + attr_reader :text + + sig { params(text: OpenAI::Beta::Threads::TextDelta::OrHash).void } + attr_writer :text + + # The text content that is part of a message. + sig do + params( + index: Integer, + text: OpenAI::Beta::Threads::TextDelta::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the content part in the message. + index:, + text: nil, + # Always `text`. + type: :text + ) + end + + sig do + override.returns( + { + index: Integer, + type: Symbol, + text: OpenAI::Beta::Threads::TextDelta + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion.rbi b/rbi/openai/models/chat/chat_completion.rbi new file mode 100644 index 00000000..c29271e7 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion.rbi @@ -0,0 +1,426 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletion = Chat::ChatCompletion + + module Chat + class ChatCompletion < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Chat::ChatCompletion, OpenAI::Internal::AnyHash) + end + + # A unique identifier for the chat completion. + sig { returns(String) } + attr_accessor :id + + # A list of chat completion choices. Can be more than one if `n` is greater + # than 1. + sig { returns(T::Array[OpenAI::Chat::ChatCompletion::Choice]) } + attr_accessor :choices + + # The Unix timestamp (in seconds) of when the chat completion was created. + sig { returns(Integer) } + attr_accessor :created + + # The model used for the chat completion. + sig { returns(String) } + attr_accessor :model + + # The object type, which is always `chat.completion`. + sig { returns(Symbol) } + attr_accessor :object + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + sig do + returns( + T.nilable(OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol) + ) + end + attr_accessor :service_tier + + # This fingerprint represents the backend configuration that the model runs with. + # + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. + sig { returns(T.nilable(String)) } + attr_reader :system_fingerprint + + sig { params(system_fingerprint: String).void } + attr_writer :system_fingerprint + + # Usage statistics for the completion request. + sig { returns(T.nilable(OpenAI::CompletionUsage)) } + attr_reader :usage + + sig { params(usage: OpenAI::CompletionUsage::OrHash).void } + attr_writer :usage + + # Represents a chat completion response returned by model, based on the provided + # input. + sig do + params( + id: String, + choices: T::Array[OpenAI::Chat::ChatCompletion::Choice::OrHash], + created: Integer, + model: String, + service_tier: + T.nilable(OpenAI::Chat::ChatCompletion::ServiceTier::OrSymbol), + system_fingerprint: String, + usage: OpenAI::CompletionUsage::OrHash, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # A unique identifier for the chat completion. + id:, + # A list of chat completion choices. Can be more than one if `n` is greater + # than 1. + choices:, + # The Unix timestamp (in seconds) of when the chat completion was created. + created:, + # The model used for the chat completion. + model:, + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + service_tier: nil, + # This fingerprint represents the backend configuration that the model runs with. + # + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. + system_fingerprint: nil, + # Usage statistics for the completion request. + usage: nil, + # The object type, which is always `chat.completion`. + object: :"chat.completion" + ) + end + + sig do + override.returns( + { + id: String, + choices: T::Array[OpenAI::Chat::ChatCompletion::Choice], + created: Integer, + model: String, + object: Symbol, + service_tier: + T.nilable( + OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol + ), + system_fingerprint: String, + usage: OpenAI::CompletionUsage + } + ) + end + def to_hash + end + + class Choice < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletion::Choice, + OpenAI::Internal::AnyHash + ) + end + + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. + sig do + returns( + OpenAI::Chat::ChatCompletion::Choice::FinishReason::TaggedSymbol + ) + end + attr_accessor :finish_reason + + # The index of the choice in the list of choices. + sig { returns(Integer) } + attr_accessor :index + + # Log probability information for the choice. + sig do + returns(T.nilable(OpenAI::Chat::ChatCompletion::Choice::Logprobs)) + end + attr_reader :logprobs + + sig do + params( + logprobs: + T.nilable( + OpenAI::Chat::ChatCompletion::Choice::Logprobs::OrHash + ) + ).void + end + attr_writer :logprobs + + # A chat completion message generated by the model. + sig { returns(OpenAI::Chat::ChatCompletionMessage) } + attr_reader :message + + sig do + params(message: OpenAI::Chat::ChatCompletionMessage::OrHash).void + end + attr_writer :message + + sig do + params( + finish_reason: + OpenAI::Chat::ChatCompletion::Choice::FinishReason::OrSymbol, + index: Integer, + logprobs: + T.nilable( + OpenAI::Chat::ChatCompletion::Choice::Logprobs::OrHash + ), + message: OpenAI::Chat::ChatCompletionMessage::OrHash + ).returns(T.attached_class) + end + def self.new( + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. + finish_reason:, + # The index of the choice in the list of choices. + index:, + # Log probability information for the choice. + logprobs:, + # A chat completion message generated by the model. + message: + ) + end + + sig do + override.returns( + { + finish_reason: + OpenAI::Chat::ChatCompletion::Choice::FinishReason::TaggedSymbol, + index: Integer, + logprobs: + T.nilable(OpenAI::Chat::ChatCompletion::Choice::Logprobs), + message: OpenAI::Chat::ChatCompletionMessage + } + ) + end + def to_hash + end + + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. + module FinishReason + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::ChatCompletion::Choice::FinishReason + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + STOP = + T.let( + :stop, + OpenAI::Chat::ChatCompletion::Choice::FinishReason::TaggedSymbol + ) + LENGTH = + T.let( + :length, + OpenAI::Chat::ChatCompletion::Choice::FinishReason::TaggedSymbol + ) + TOOL_CALLS = + T.let( + :tool_calls, + OpenAI::Chat::ChatCompletion::Choice::FinishReason::TaggedSymbol + ) + CONTENT_FILTER = + T.let( + :content_filter, + OpenAI::Chat::ChatCompletion::Choice::FinishReason::TaggedSymbol + ) + FUNCTION_CALL = + T.let( + :function_call, + OpenAI::Chat::ChatCompletion::Choice::FinishReason::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletion::Choice::FinishReason::TaggedSymbol + ] + ) + end + def self.values + end + end + + class Logprobs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletion::Choice::Logprobs, + OpenAI::Internal::AnyHash + ) + end + + # A list of message content tokens with log probability information. + sig do + returns( + T.nilable(T::Array[OpenAI::Chat::ChatCompletionTokenLogprob]) + ) + end + attr_accessor :content + + # A list of message refusal tokens with log probability information. + sig do + returns( + T.nilable(T::Array[OpenAI::Chat::ChatCompletionTokenLogprob]) + ) + end + attr_accessor :refusal + + # Log probability information for the choice. + sig do + params( + content: + T.nilable( + T::Array[OpenAI::Chat::ChatCompletionTokenLogprob::OrHash] + ), + refusal: + T.nilable( + T::Array[OpenAI::Chat::ChatCompletionTokenLogprob::OrHash] + ) + ).returns(T.attached_class) + end + def self.new( + # A list of message content tokens with log probability information. + content:, + # A list of message refusal tokens with log probability information. + refusal: + ) + end + + sig do + override.returns( + { + content: + T.nilable( + T::Array[OpenAI::Chat::ChatCompletionTokenLogprob] + ), + refusal: + T.nilable( + T::Array[OpenAI::Chat::ChatCompletionTokenLogprob] + ) + } + ) + end + def to_hash + end + end + end + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + module ServiceTier + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::ChatCompletion::ServiceTier) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol + ) + DEFAULT = + T.let( + :default, + OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol + ) + FLEX = + T.let( + :flex, + OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol + ) + SCALE = + T.let( + :scale, + OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol + ) + PRIORITY = + T.let( + :priority, + OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi b/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi new file mode 100644 index 00000000..17d1cecc --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi @@ -0,0 +1,60 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice + + module Chat + class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionAllowedToolChoice, + OpenAI::Internal::AnyHash + ) + end + + # Constrains the tools available to the model to a pre-defined set. + sig { returns(OpenAI::Chat::ChatCompletionAllowedTools) } + attr_reader :allowed_tools + + sig do + params( + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools::OrHash + ).void + end + attr_writer :allowed_tools + + # Allowed tool configuration type. Always `allowed_tools`. + sig { returns(Symbol) } + attr_accessor :type + + # Constrains the tools available to the model to a pre-defined set. + sig do + params( + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Constrains the tools available to the model to a pre-defined set. + allowed_tools:, + # Allowed tool configuration type. Always `allowed_tools`. + type: :allowed_tools + ) + end + + sig do + override.returns( + { + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_allowed_tools.rbi b/rbi/openai/models/chat/chat_completion_allowed_tools.rbi new file mode 100644 index 00000000..6dbb2e20 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_allowed_tools.rbi @@ -0,0 +1,118 @@ +# typed: strong + +module OpenAI + module Models + module Chat + class ChatCompletionAllowedTools < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionAllowedTools, + OpenAI::Internal::AnyHash + ) + end + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + sig do + returns(OpenAI::Chat::ChatCompletionAllowedTools::Mode::OrSymbol) + end + attr_accessor :mode + + # A list of tool definitions that the model should be allowed to call. + # + # For the Chat Completions API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "function": { "name": "get_weather" } }, + # { "type": "function", "function": { "name": "get_time" } } + # ] + # ``` + sig { returns(T::Array[T::Hash[Symbol, T.anything]]) } + attr_accessor :tools + + # Constrains the tools available to the model to a pre-defined set. + sig do + params( + mode: OpenAI::Chat::ChatCompletionAllowedTools::Mode::OrSymbol, + tools: T::Array[T::Hash[Symbol, T.anything]] + ).returns(T.attached_class) + end + def self.new( + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + mode:, + # A list of tool definitions that the model should be allowed to call. + # + # For the Chat Completions API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "function": { "name": "get_weather" } }, + # { "type": "function", "function": { "name": "get_time" } } + # ] + # ``` + tools: + ) + end + + sig do + override.returns( + { + mode: OpenAI::Chat::ChatCompletionAllowedTools::Mode::OrSymbol, + tools: T::Array[T::Hash[Symbol, T.anything]] + } + ) + end + def to_hash + end + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + module Mode + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::ChatCompletionAllowedTools::Mode) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Chat::ChatCompletionAllowedTools::Mode::TaggedSymbol + ) + REQUIRED = + T.let( + :required, + OpenAI::Chat::ChatCompletionAllowedTools::Mode::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionAllowedTools::Mode::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi b/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi new file mode 100644 index 00000000..59c05235 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi @@ -0,0 +1,322 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionAssistantMessageParam = + Chat::ChatCompletionAssistantMessageParam + + module Chat + class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionAssistantMessageParam, + OpenAI::Internal::AnyHash + ) + end + + # The role of the messages author, in this case `assistant`. + sig { returns(Symbol) } + attr_accessor :role + + # Data about a previous audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). + sig do + returns( + T.nilable(OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio) + ) + end + attr_reader :audio + + sig do + params( + audio: + T.nilable( + OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio::OrHash + ) + ).void + end + attr_writer :audio + + # The contents of the assistant message. Required unless `tool_calls` or + # `function_call` is specified. + sig do + returns( + T.nilable( + OpenAI::Chat::ChatCompletionAssistantMessageParam::Content::Variants + ) + ) + end + attr_accessor :content + + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + sig do + returns( + T.nilable( + OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall + ) + ) + end + attr_reader :function_call + + sig do + params( + function_call: + T.nilable( + OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall::OrHash + ) + ).void + end + attr_writer :function_call + + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + # The refusal message by the assistant. + sig { returns(T.nilable(String)) } + attr_accessor :refusal + + # The tool calls generated by the model, such as function calls. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall, + OpenAI::Chat::ChatCompletionMessageCustomToolCall + ) + ] + ) + ) + end + attr_reader :tool_calls + + sig do + params( + tool_calls: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::OrHash, + OpenAI::Chat::ChatCompletionMessageCustomToolCall::OrHash + ) + ] + ).void + end + attr_writer :tool_calls + + # Messages sent by the model in response to user messages. + sig do + params( + audio: + T.nilable( + OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio::OrHash + ), + content: + T.nilable( + OpenAI::Chat::ChatCompletionAssistantMessageParam::Content::Variants + ), + function_call: + T.nilable( + OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall::OrHash + ), + name: String, + refusal: T.nilable(String), + tool_calls: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::OrHash, + OpenAI::Chat::ChatCompletionMessageCustomToolCall::OrHash + ) + ], + role: Symbol + ).returns(T.attached_class) + end + def self.new( + # Data about a previous audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). + audio: nil, + # The contents of the assistant message. Required unless `tool_calls` or + # `function_call` is specified. + content: nil, + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + function_call: nil, + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. + name: nil, + # The refusal message by the assistant. + refusal: nil, + # The tool calls generated by the model, such as function calls. + tool_calls: nil, + # The role of the messages author, in this case `assistant`. + role: :assistant + ) + end + + sig do + override.returns( + { + role: Symbol, + audio: + T.nilable( + OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio + ), + content: + T.nilable( + OpenAI::Chat::ChatCompletionAssistantMessageParam::Content::Variants + ), + function_call: + T.nilable( + OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall + ), + name: String, + refusal: T.nilable(String), + tool_calls: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall, + OpenAI::Chat::ChatCompletionMessageCustomToolCall + ) + ] + } + ) + end + def to_hash + end + + class Audio < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for a previous audio response from the model. + sig { returns(String) } + attr_accessor :id + + # Data about a previous audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). + sig { params(id: String).returns(T.attached_class) } + def self.new( + # Unique identifier for a previous audio response from the model. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The contents of the assistant message. Required unless `tool_calls` or + # `function_call` is specified. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[ + OpenAI::Chat::ChatCompletionAssistantMessageParam::Content::ArrayOfContentPart::Variants + ] + ) + end + + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). + module ArrayOfContentPart + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPartText, + OpenAI::Chat::ChatCompletionContentPartRefusal + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionAssistantMessageParam::Content::ArrayOfContentPart::Variants + ] + ) + end + def self.variants + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionAssistantMessageParam::Content::Variants + ] + ) + end + def self.variants + end + + ArrayOfContentPartArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + union: + OpenAI::Chat::ChatCompletionAssistantMessageParam::Content::ArrayOfContentPart + ], + OpenAI::Internal::Type::Converter + ) + end + + class FunctionCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall, + OpenAI::Internal::AnyHash + ) + end + + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + sig { returns(String) } + attr_accessor :arguments + + # The name of the function to call. + sig { returns(String) } + attr_accessor :name + + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + sig do + params(arguments: String, name: String).returns(T.attached_class) + end + def self.new( + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + arguments:, + # The name of the function to call. + name: + ) + end + + sig { override.returns({ arguments: String, name: String }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_audio.rbi b/rbi/openai/models/chat/chat_completion_audio.rbi new file mode 100644 index 00000000..59c400f9 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_audio.rbi @@ -0,0 +1,72 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionAudio = Chat::ChatCompletionAudio + + module Chat + class ChatCompletionAudio < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Chat::ChatCompletionAudio, OpenAI::Internal::AnyHash) + end + + # Unique identifier for this audio response. + sig { returns(String) } + attr_accessor :id + + # Base64 encoded audio bytes generated by the model, in the format specified in + # the request. + sig { returns(String) } + attr_accessor :data + + # The Unix timestamp (in seconds) for when this audio response will no longer be + # accessible on the server for use in multi-turn conversations. + sig { returns(Integer) } + attr_accessor :expires_at + + # Transcript of the audio generated by the model. + sig { returns(String) } + attr_accessor :transcript + + # If the audio output modality is requested, this object contains data about the + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). + sig do + params( + id: String, + data: String, + expires_at: Integer, + transcript: String + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for this audio response. + id:, + # Base64 encoded audio bytes generated by the model, in the format specified in + # the request. + data:, + # The Unix timestamp (in seconds) for when this audio response will no longer be + # accessible on the server for use in multi-turn conversations. + expires_at:, + # Transcript of the audio generated by the model. + transcript: + ) + end + + sig do + override.returns( + { + id: String, + data: String, + expires_at: Integer, + transcript: String + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_audio_param.rbi b/rbi/openai/models/chat/chat_completion_audio_param.rbi new file mode 100644 index 00000000..6299321c --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_audio_param.rbi @@ -0,0 +1,198 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionAudioParam = Chat::ChatCompletionAudioParam + + module Chat + class ChatCompletionAudioParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionAudioParam, + OpenAI::Internal::AnyHash + ) + end + + # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, + # or `pcm16`. + sig do + returns(OpenAI::Chat::ChatCompletionAudioParam::Format::OrSymbol) + end + attr_accessor :format_ + + # The voice the model uses to respond. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. + sig do + returns( + T.any( + String, + OpenAI::Chat::ChatCompletionAudioParam::Voice::OrSymbol + ) + ) + end + attr_accessor :voice + + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). + sig do + params( + format_: OpenAI::Chat::ChatCompletionAudioParam::Format::OrSymbol, + voice: + T.any( + String, + OpenAI::Chat::ChatCompletionAudioParam::Voice::OrSymbol + ) + ).returns(T.attached_class) + end + def self.new( + # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, + # or `pcm16`. + format_:, + # The voice the model uses to respond. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. + voice: + ) + end + + sig do + override.returns( + { + format_: OpenAI::Chat::ChatCompletionAudioParam::Format::OrSymbol, + voice: + T.any( + String, + OpenAI::Chat::ChatCompletionAudioParam::Voice::OrSymbol + ) + } + ) + end + def to_hash + end + + # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, + # or `pcm16`. + module Format + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::ChatCompletionAudioParam::Format) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + WAV = + T.let( + :wav, + OpenAI::Chat::ChatCompletionAudioParam::Format::TaggedSymbol + ) + AAC = + T.let( + :aac, + OpenAI::Chat::ChatCompletionAudioParam::Format::TaggedSymbol + ) + MP3 = + T.let( + :mp3, + OpenAI::Chat::ChatCompletionAudioParam::Format::TaggedSymbol + ) + FLAC = + T.let( + :flac, + OpenAI::Chat::ChatCompletionAudioParam::Format::TaggedSymbol + ) + OPUS = + T.let( + :opus, + OpenAI::Chat::ChatCompletionAudioParam::Format::TaggedSymbol + ) + PCM16 = + T.let( + :pcm16, + OpenAI::Chat::ChatCompletionAudioParam::Format::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionAudioParam::Format::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The voice the model uses to respond. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. + module Voice + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol + ) + end + + sig do + override.returns( + T::Array[OpenAI::Chat::ChatCompletionAudioParam::Voice::Variants] + ) + end + def self.variants + end + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::ChatCompletionAudioParam::Voice) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ALLOY = + T.let( + :alloy, + OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol + ) + ASH = + T.let( + :ash, + OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol + ) + BALLAD = + T.let( + :ballad, + OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol + ) + CORAL = + T.let( + :coral, + OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol + ) + ECHO = + T.let( + :echo, + OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol + ) + SAGE = + T.let( + :sage, + OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol + ) + SHIMMER = + T.let( + :shimmer, + OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol + ) + VERSE = + T.let( + :verse, + OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol + ) + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_chunk.rbi b/rbi/openai/models/chat/chat_completion_chunk.rbi new file mode 100644 index 00000000..f309f2e3 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_chunk.rbi @@ -0,0 +1,848 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionChunk = Chat::ChatCompletionChunk + + module Chat + class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Chat::ChatCompletionChunk, OpenAI::Internal::AnyHash) + end + + # A unique identifier for the chat completion. Each chunk has the same ID. + sig { returns(String) } + attr_accessor :id + + # A list of chat completion choices. Can contain more than one elements if `n` is + # greater than 1. Can also be empty for the last chunk if you set + # `stream_options: {"include_usage": true}`. + sig { returns(T::Array[OpenAI::Chat::ChatCompletionChunk::Choice]) } + attr_accessor :choices + + # The Unix timestamp (in seconds) of when the chat completion was created. Each + # chunk has the same timestamp. + sig { returns(Integer) } + attr_accessor :created + + # The model to generate the completion. + sig { returns(String) } + attr_accessor :model + + # The object type, which is always `chat.completion.chunk`. + sig { returns(Symbol) } + attr_accessor :object + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + sig do + returns( + T.nilable( + OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol + ) + ) + end + attr_accessor :service_tier + + # This fingerprint represents the backend configuration that the model runs with. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. + sig { returns(T.nilable(String)) } + attr_reader :system_fingerprint + + sig { params(system_fingerprint: String).void } + attr_writer :system_fingerprint + + # An optional field that will only be present when you set + # `stream_options: {"include_usage": true}` in your request. When present, it + # contains a null value **except for the last chunk** which contains the token + # usage statistics for the entire request. + # + # **NOTE:** If the stream is interrupted or cancelled, you may not receive the + # final usage chunk which contains the total token usage for the request. + sig { returns(T.nilable(OpenAI::CompletionUsage)) } + attr_reader :usage + + sig { params(usage: T.nilable(OpenAI::CompletionUsage::OrHash)).void } + attr_writer :usage + + # Represents a streamed chunk of a chat completion response returned by the model, + # based on the provided input. + # [Learn more](https://platform.openai.com/docs/guides/streaming-responses). + sig do + params( + id: String, + choices: + T::Array[OpenAI::Chat::ChatCompletionChunk::Choice::OrHash], + created: Integer, + model: String, + service_tier: + T.nilable( + OpenAI::Chat::ChatCompletionChunk::ServiceTier::OrSymbol + ), + system_fingerprint: String, + usage: T.nilable(OpenAI::CompletionUsage::OrHash), + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # A unique identifier for the chat completion. Each chunk has the same ID. + id:, + # A list of chat completion choices. Can contain more than one elements if `n` is + # greater than 1. Can also be empty for the last chunk if you set + # `stream_options: {"include_usage": true}`. + choices:, + # The Unix timestamp (in seconds) of when the chat completion was created. Each + # chunk has the same timestamp. + created:, + # The model to generate the completion. + model:, + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + service_tier: nil, + # This fingerprint represents the backend configuration that the model runs with. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. + system_fingerprint: nil, + # An optional field that will only be present when you set + # `stream_options: {"include_usage": true}` in your request. When present, it + # contains a null value **except for the last chunk** which contains the token + # usage statistics for the entire request. + # + # **NOTE:** If the stream is interrupted or cancelled, you may not receive the + # final usage chunk which contains the total token usage for the request. + usage: nil, + # The object type, which is always `chat.completion.chunk`. + object: :"chat.completion.chunk" + ) + end + + sig do + override.returns( + { + id: String, + choices: T::Array[OpenAI::Chat::ChatCompletionChunk::Choice], + created: Integer, + model: String, + object: Symbol, + service_tier: + T.nilable( + OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol + ), + system_fingerprint: String, + usage: T.nilable(OpenAI::CompletionUsage) + } + ) + end + def to_hash + end + + class Choice < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionChunk::Choice, + OpenAI::Internal::AnyHash + ) + end + + # A chat completion delta generated by streamed model responses. + sig { returns(OpenAI::Chat::ChatCompletionChunk::Choice::Delta) } + attr_reader :delta + + sig do + params( + delta: OpenAI::Chat::ChatCompletionChunk::Choice::Delta::OrHash + ).void + end + attr_writer :delta + + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. + sig do + returns( + T.nilable( + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason::TaggedSymbol + ) + ) + end + attr_accessor :finish_reason + + # The index of the choice in the list of choices. + sig { returns(Integer) } + attr_accessor :index + + # Log probability information for the choice. + sig do + returns( + T.nilable(OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs) + ) + end + attr_reader :logprobs + + sig do + params( + logprobs: + T.nilable( + OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs::OrHash + ) + ).void + end + attr_writer :logprobs + + sig do + params( + delta: OpenAI::Chat::ChatCompletionChunk::Choice::Delta::OrHash, + finish_reason: + T.nilable( + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason::OrSymbol + ), + index: Integer, + logprobs: + T.nilable( + OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # A chat completion delta generated by streamed model responses. + delta:, + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. + finish_reason:, + # The index of the choice in the list of choices. + index:, + # Log probability information for the choice. + logprobs: nil + ) + end + + sig do + override.returns( + { + delta: OpenAI::Chat::ChatCompletionChunk::Choice::Delta, + finish_reason: + T.nilable( + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason::TaggedSymbol + ), + index: Integer, + logprobs: + T.nilable(OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs) + } + ) + end + def to_hash + end + + class Delta < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionChunk::Choice::Delta, + OpenAI::Internal::AnyHash + ) + end + + # The contents of the chunk message. + sig { returns(T.nilable(String)) } + attr_accessor :content + + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + sig do + returns( + T.nilable( + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall + ) + ) + end + attr_reader :function_call + + sig do + params( + function_call: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall::OrHash + ).void + end + attr_writer :function_call + + # The refusal message generated by the model. + sig { returns(T.nilable(String)) } + attr_accessor :refusal + + # The role of the author of this message. + sig do + returns( + T.nilable( + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role::TaggedSymbol + ) + ) + end + attr_reader :role + + sig do + params( + role: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role::OrSymbol + ).void + end + attr_writer :role + + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall + ] + ) + ) + end + attr_reader :tool_calls + + sig do + params( + tool_calls: + T::Array[ + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::OrHash + ] + ).void + end + attr_writer :tool_calls + + # A chat completion delta generated by streamed model responses. + sig do + params( + content: T.nilable(String), + function_call: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall::OrHash, + refusal: T.nilable(String), + role: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role::OrSymbol, + tool_calls: + T::Array[ + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::OrHash + ] + ).returns(T.attached_class) + end + def self.new( + # The contents of the chunk message. + content: nil, + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + function_call: nil, + # The refusal message generated by the model. + refusal: nil, + # The role of the author of this message. + role: nil, + tool_calls: nil + ) + end + + sig do + override.returns( + { + content: T.nilable(String), + function_call: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, + refusal: T.nilable(String), + role: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role::TaggedSymbol, + tool_calls: + T::Array[ + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall + ] + } + ) + end + def to_hash + end + + class FunctionCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, + OpenAI::Internal::AnyHash + ) + end + + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + sig { returns(T.nilable(String)) } + attr_reader :arguments + + sig { params(arguments: String).void } + attr_writer :arguments + + # The name of the function to call. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + sig do + params(arguments: String, name: String).returns( + T.attached_class + ) + end + def self.new( + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + arguments: nil, + # The name of the function to call. + name: nil + ) + end + + sig { override.returns({ arguments: String, name: String }) } + def to_hash + end + end + + # The role of the author of this message. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + DEVELOPER = + T.let( + :developer, + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role::TaggedSymbol + ) + USER = + T.let( + :user, + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role::TaggedSymbol + ) + TOOL = + T.let( + :tool, + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + class ToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(Integer) } + attr_accessor :index + + # The ID of the tool call. + sig { returns(T.nilable(String)) } + attr_reader :id + + sig { params(id: String).void } + attr_writer :id + + sig do + returns( + T.nilable( + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function + ) + ) + end + attr_reader :function + + sig do + params( + function: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function::OrHash + ).void + end + attr_writer :function + + # The type of the tool. Currently, only `function` is supported. + sig do + returns( + T.nilable( + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type::TaggedSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type::OrSymbol + ).void + end + attr_writer :type + + sig do + params( + index: Integer, + id: String, + function: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function::OrHash, + type: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + index:, + # The ID of the tool call. + id: nil, + function: nil, + # The type of the tool. Currently, only `function` is supported. + type: nil + ) + end + + sig do + override.returns( + { + index: Integer, + id: String, + function: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, + type: + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type::TaggedSymbol + } + ) + end + def to_hash + end + + class Function < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, + OpenAI::Internal::AnyHash + ) + end + + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + sig { returns(T.nilable(String)) } + attr_reader :arguments + + sig { params(arguments: String).void } + attr_writer :arguments + + # The name of the function to call. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + sig do + params(arguments: String, name: String).returns( + T.attached_class + ) + end + def self.new( + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + arguments: nil, + # The name of the function to call. + name: nil + ) + end + + sig { override.returns({ arguments: String, name: String }) } + def to_hash + end + end + + # The type of the tool. Currently, only `function` is supported. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + FUNCTION = + T.let( + :function, + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. + module FinishReason + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + STOP = + T.let( + :stop, + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason::TaggedSymbol + ) + LENGTH = + T.let( + :length, + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason::TaggedSymbol + ) + TOOL_CALLS = + T.let( + :tool_calls, + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason::TaggedSymbol + ) + CONTENT_FILTER = + T.let( + :content_filter, + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason::TaggedSymbol + ) + FUNCTION_CALL = + T.let( + :function_call, + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason::TaggedSymbol + ] + ) + end + def self.values + end + end + + class Logprobs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs, + OpenAI::Internal::AnyHash + ) + end + + # A list of message content tokens with log probability information. + sig do + returns( + T.nilable(T::Array[OpenAI::Chat::ChatCompletionTokenLogprob]) + ) + end + attr_accessor :content + + # A list of message refusal tokens with log probability information. + sig do + returns( + T.nilable(T::Array[OpenAI::Chat::ChatCompletionTokenLogprob]) + ) + end + attr_accessor :refusal + + # Log probability information for the choice. + sig do + params( + content: + T.nilable( + T::Array[OpenAI::Chat::ChatCompletionTokenLogprob::OrHash] + ), + refusal: + T.nilable( + T::Array[OpenAI::Chat::ChatCompletionTokenLogprob::OrHash] + ) + ).returns(T.attached_class) + end + def self.new( + # A list of message content tokens with log probability information. + content:, + # A list of message refusal tokens with log probability information. + refusal: + ) + end + + sig do + override.returns( + { + content: + T.nilable( + T::Array[OpenAI::Chat::ChatCompletionTokenLogprob] + ), + refusal: + T.nilable( + T::Array[OpenAI::Chat::ChatCompletionTokenLogprob] + ) + } + ) + end + def to_hash + end + end + end + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + module ServiceTier + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::ChatCompletionChunk::ServiceTier) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol + ) + DEFAULT = + T.let( + :default, + OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol + ) + FLEX = + T.let( + :flex, + OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol + ) + SCALE = + T.let( + :scale, + OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol + ) + PRIORITY = + T.let( + :priority, + OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_content_part.rbi b/rbi/openai/models/chat/chat_completion_content_part.rbi new file mode 100644 index 00000000..33230ab9 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_content_part.rbi @@ -0,0 +1,141 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionContentPart = Chat::ChatCompletionContentPart + + module Chat + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). + module ChatCompletionContentPart + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPartText, + OpenAI::Chat::ChatCompletionContentPartImage, + OpenAI::Chat::ChatCompletionContentPartInputAudio, + OpenAI::Chat::ChatCompletionContentPart::File + ) + end + + class File < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPart::File, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::Chat::ChatCompletionContentPart::File::File) } + attr_reader :file + + sig do + params( + file: OpenAI::Chat::ChatCompletionContentPart::File::File::OrHash + ).void + end + attr_writer :file + + # The type of the content part. Always `file`. + sig { returns(Symbol) } + attr_accessor :type + + # Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text + # generation. + sig do + params( + file: OpenAI::Chat::ChatCompletionContentPart::File::File::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + file:, + # The type of the content part. Always `file`. + type: :file + ) + end + + sig do + override.returns( + { + file: OpenAI::Chat::ChatCompletionContentPart::File::File, + type: Symbol + } + ) + end + def to_hash + end + + class File < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPart::File::File, + OpenAI::Internal::AnyHash + ) + end + + # The base64 encoded file data, used when passing the file to the model as a + # string. + sig { returns(T.nilable(String)) } + attr_reader :file_data + + sig { params(file_data: String).void } + attr_writer :file_data + + # The ID of an uploaded file to use as input. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + # The name of the file, used when passing the file to the model as a string. + sig { returns(T.nilable(String)) } + attr_reader :filename + + sig { params(filename: String).void } + attr_writer :filename + + sig do + params( + file_data: String, + file_id: String, + filename: String + ).returns(T.attached_class) + end + def self.new( + # The base64 encoded file data, used when passing the file to the model as a + # string. + file_data: nil, + # The ID of an uploaded file to use as input. + file_id: nil, + # The name of the file, used when passing the file to the model as a string. + filename: nil + ) + end + + sig do + override.returns( + { file_data: String, file_id: String, filename: String } + ) + end + def to_hash + end + end + end + + sig do + override.returns( + T::Array[OpenAI::Chat::ChatCompletionContentPart::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_content_part_image.rbi b/rbi/openai/models/chat/chat_completion_content_part_image.rbi new file mode 100644 index 00000000..5c2ff978 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_content_part_image.rbi @@ -0,0 +1,162 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionContentPartImage = Chat::ChatCompletionContentPartImage + + module Chat + class ChatCompletionContentPartImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPartImage, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::Chat::ChatCompletionContentPartImage::ImageURL) } + attr_reader :image_url + + sig do + params( + image_url: + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::OrHash + ).void + end + attr_writer :image_url + + # The type of the content part. + sig { returns(Symbol) } + attr_accessor :type + + # Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + sig do + params( + image_url: + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + image_url:, + # The type of the content part. + type: :image_url + ) + end + + sig do + override.returns( + { + image_url: OpenAI::Chat::ChatCompletionContentPartImage::ImageURL, + type: Symbol + } + ) + end + def to_hash + end + + class ImageURL < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL, + OpenAI::Internal::AnyHash + ) + end + + # Either a URL of the image or the base64 encoded image data. + sig { returns(String) } + attr_accessor :url + + # Specifies the detail level of the image. Learn more in the + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + sig do + returns( + T.nilable( + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail::OrSymbol + ) + ) + end + attr_reader :detail + + sig do + params( + detail: + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail::OrSymbol + ).void + end + attr_writer :detail + + sig do + params( + url: String, + detail: + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Either a URL of the image or the base64 encoded image data. + url:, + # Specifies the detail level of the image. Learn more in the + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + detail: nil + ) + end + + sig do + override.returns( + { + url: String, + detail: + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail::OrSymbol + } + ) + end + def to_hash + end + + # Specifies the detail level of the image. Learn more in the + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + module Detail + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail::TaggedSymbol + ) + LOW = + T.let( + :low, + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_content_part_input_audio.rbi b/rbi/openai/models/chat/chat_completion_content_part_input_audio.rbi new file mode 100644 index 00000000..addd6b5b --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_content_part_input_audio.rbi @@ -0,0 +1,148 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionContentPartInputAudio = + Chat::ChatCompletionContentPartInputAudio + + module Chat + class ChatCompletionContentPartInputAudio < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPartInputAudio, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns(OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio) + end + attr_reader :input_audio + + sig do + params( + input_audio: + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::OrHash + ).void + end + attr_writer :input_audio + + # The type of the content part. Always `input_audio`. + sig { returns(Symbol) } + attr_accessor :type + + # Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). + sig do + params( + input_audio: + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + input_audio:, + # The type of the content part. Always `input_audio`. + type: :input_audio + ) + end + + sig do + override.returns( + { + input_audio: + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio, + type: Symbol + } + ) + end + def to_hash + end + + class InputAudio < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio, + OpenAI::Internal::AnyHash + ) + end + + # Base64 encoded audio data. + sig { returns(String) } + attr_accessor :data + + # The format of the encoded audio data. Currently supports "wav" and "mp3". + sig do + returns( + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format::OrSymbol + ) + end + attr_accessor :format_ + + sig do + params( + data: String, + format_: + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Base64 encoded audio data. + data:, + # The format of the encoded audio data. Currently supports "wav" and "mp3". + format_: + ) + end + + sig do + override.returns( + { + data: String, + format_: + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format::OrSymbol + } + ) + end + def to_hash + end + + # The format of the encoded audio data. Currently supports "wav" and "mp3". + module Format + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + WAV = + T.let( + :wav, + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format::TaggedSymbol + ) + MP3 = + T.let( + :mp3, + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_content_part_refusal.rbi b/rbi/openai/models/chat/chat_completion_content_part_refusal.rbi new file mode 100644 index 00000000..8e6e0c55 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_content_part_refusal.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionContentPartRefusal = Chat::ChatCompletionContentPartRefusal + + module Chat + class ChatCompletionContentPartRefusal < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPartRefusal, + OpenAI::Internal::AnyHash + ) + end + + # The refusal message generated by the model. + sig { returns(String) } + attr_accessor :refusal + + # The type of the content part. + sig { returns(Symbol) } + attr_accessor :type + + sig { params(refusal: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The refusal message generated by the model. + refusal:, + # The type of the content part. + type: :refusal + ) + end + + sig { override.returns({ refusal: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_content_part_text.rbi b/rbi/openai/models/chat/chat_completion_content_part_text.rbi new file mode 100644 index 00000000..c11d4932 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_content_part_text.rbi @@ -0,0 +1,42 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionContentPartText = Chat::ChatCompletionContentPartText + + module Chat + class ChatCompletionContentPartText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPartText, + OpenAI::Internal::AnyHash + ) + end + + # The text content. + sig { returns(String) } + attr_accessor :text + + # The type of the content part. + sig { returns(Symbol) } + attr_accessor :type + + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The text content. + text:, + # The type of the content part. + type: :text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_custom_tool.rbi b/rbi/openai/models/chat/chat_completion_custom_tool.rbi new file mode 100644 index 00000000..2ce1f84a --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_custom_tool.rbi @@ -0,0 +1,335 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionCustomTool = Chat::ChatCompletionCustomTool + + module Chat + class ChatCompletionCustomTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool, + OpenAI::Internal::AnyHash + ) + end + + # Properties of the custom tool. + sig { returns(OpenAI::Chat::ChatCompletionCustomTool::Custom) } + attr_reader :custom + + sig do + params( + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom::OrHash + ).void + end + attr_writer :custom + + # The type of the custom tool. Always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # A custom tool that processes input using a specified format. + sig do + params( + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Properties of the custom tool. + custom:, + # The type of the custom tool. Always `custom`. + type: :custom + ) + end + + sig do + override.returns( + { + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom, + type: Symbol + } + ) + end + def to_hash + end + + class Custom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom, + OpenAI::Internal::AnyHash + ) + end + + # The name of the custom tool, used to identify it in tool calls. + sig { returns(String) } + attr_accessor :name + + # Optional description of the custom tool, used to provide more context. + sig { returns(T.nilable(String)) } + attr_reader :description + + sig { params(description: String).void } + attr_writer :description + + # The input format for the custom tool. Default is unconstrained text. + sig do + returns( + T.nilable( + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar + ) + ) + ) + end + attr_reader :format_ + + sig do + params( + format_: + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::OrHash + ) + ).void + end + attr_writer :format_ + + # Properties of the custom tool. + sig do + params( + name: String, + description: String, + format_: + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # The name of the custom tool, used to identify it in tool calls. + name:, + # Optional description of the custom tool, used to provide more context. + description: nil, + # The input format for the custom tool. Default is unconstrained text. + format_: nil + ) + end + + sig do + override.returns( + { + name: String, + description: String, + format_: + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar + ) + } + ) + end + def to_hash + end + + # The input format for the custom tool. Default is unconstrained text. + module Format + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar + ) + end + + class Text < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text, + OpenAI::Internal::AnyHash + ) + end + + # Unconstrained text format. Always `text`. + sig { returns(Symbol) } + attr_accessor :type + + # Unconstrained free-form text. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Unconstrained text format. Always `text`. + type: :text + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + class Grammar < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar, + OpenAI::Internal::AnyHash + ) + end + + # Your chosen grammar. + sig do + returns( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar + ) + end + attr_reader :grammar + + sig do + params( + grammar: + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::OrHash + ).void + end + attr_writer :grammar + + # Grammar format. Always `grammar`. + sig { returns(Symbol) } + attr_accessor :type + + # A grammar defined by the user. + sig do + params( + grammar: + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Your chosen grammar. + grammar:, + # Grammar format. Always `grammar`. + type: :grammar + ) + end + + sig do + override.returns( + { + grammar: + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar, + type: Symbol + } + ) + end + def to_hash + end + + class Grammar < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar, + OpenAI::Internal::AnyHash + ) + end + + # The grammar definition. + sig { returns(String) } + attr_accessor :definition + + # The syntax of the grammar definition. One of `lark` or `regex`. + sig do + returns( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::OrSymbol + ) + end + attr_accessor :syntax + + # Your chosen grammar. + sig do + params( + definition: String, + syntax: + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The grammar definition. + definition:, + # The syntax of the grammar definition. One of `lark` or `regex`. + syntax: + ) + end + + sig do + override.returns( + { + definition: String, + syntax: + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::OrSymbol + } + ) + end + def to_hash + end + + # The syntax of the grammar definition. One of `lark` or `regex`. + module Syntax + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LARK = + T.let( + :lark, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::TaggedSymbol + ) + REGEX = + T.let( + :regex, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_deleted.rbi b/rbi/openai/models/chat/chat_completion_deleted.rbi new file mode 100644 index 00000000..9edfdd85 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_deleted.rbi @@ -0,0 +1,52 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionDeleted = Chat::ChatCompletionDeleted + + module Chat + class ChatCompletionDeleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionDeleted, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the chat completion that was deleted. + sig { returns(String) } + attr_accessor :id + + # Whether the chat completion was deleted. + sig { returns(T::Boolean) } + attr_accessor :deleted + + # The type of object being deleted. + sig { returns(Symbol) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The ID of the chat completion that was deleted. + id:, + # Whether the chat completion was deleted. + deleted:, + # The type of object being deleted. + object: :"chat.completion.deleted" + ) + end + + sig do + override.returns({ id: String, deleted: T::Boolean, object: Symbol }) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_developer_message_param.rbi b/rbi/openai/models/chat/chat_completion_developer_message_param.rbi new file mode 100644 index 00000000..79d40f0c --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_developer_message_param.rbi @@ -0,0 +1,106 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionDeveloperMessageParam = + Chat::ChatCompletionDeveloperMessageParam + + module Chat + class ChatCompletionDeveloperMessageParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionDeveloperMessageParam, + OpenAI::Internal::AnyHash + ) + end + + # The contents of the developer message. + sig do + returns( + OpenAI::Chat::ChatCompletionDeveloperMessageParam::Content::Variants + ) + end + attr_accessor :content + + # The role of the messages author, in this case `developer`. + sig { returns(Symbol) } + attr_accessor :role + + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. + sig do + params( + content: + OpenAI::Chat::ChatCompletionDeveloperMessageParam::Content::Variants, + name: String, + role: Symbol + ).returns(T.attached_class) + end + def self.new( + # The contents of the developer message. + content:, + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. + name: nil, + # The role of the messages author, in this case `developer`. + role: :developer + ) + end + + sig do + override.returns( + { + content: + OpenAI::Chat::ChatCompletionDeveloperMessageParam::Content::Variants, + role: Symbol, + name: String + } + ) + end + def to_hash + end + + # The contents of the developer message. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[OpenAI::Chat::ChatCompletionContentPartText] + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionDeveloperMessageParam::Content::Variants + ] + ) + end + def self.variants + end + + ChatCompletionContentPartTextArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Chat::ChatCompletionContentPartText + ], + OpenAI::Internal::Type::Converter + ) + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_function_call_option.rbi b/rbi/openai/models/chat/chat_completion_function_call_option.rbi new file mode 100644 index 00000000..bcccb328 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_function_call_option.rbi @@ -0,0 +1,36 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionFunctionCallOption = Chat::ChatCompletionFunctionCallOption + + module Chat + class ChatCompletionFunctionCallOption < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionFunctionCallOption, + OpenAI::Internal::AnyHash + ) + end + + # The name of the function to call. + sig { returns(String) } + attr_accessor :name + + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + sig { params(name: String).returns(T.attached_class) } + def self.new( + # The name of the function to call. + name: + ) + end + + sig { override.returns({ name: String }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_function_message_param.rbi b/rbi/openai/models/chat/chat_completion_function_message_param.rbi new file mode 100644 index 00000000..3b7e878a --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_function_message_param.rbi @@ -0,0 +1,57 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionFunctionMessageParam = + Chat::ChatCompletionFunctionMessageParam + + module Chat + class ChatCompletionFunctionMessageParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionFunctionMessageParam, + OpenAI::Internal::AnyHash + ) + end + + # The contents of the function message. + sig { returns(T.nilable(String)) } + attr_accessor :content + + # The name of the function to call. + sig { returns(String) } + attr_accessor :name + + # The role of the messages author, in this case `function`. + sig { returns(Symbol) } + attr_accessor :role + + sig do + params( + content: T.nilable(String), + name: String, + role: Symbol + ).returns(T.attached_class) + end + def self.new( + # The contents of the function message. + content:, + # The name of the function to call. + name:, + # The role of the messages author, in this case `function`. + role: :function + ) + end + + sig do + override.returns( + { content: T.nilable(String), name: String, role: Symbol } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_function_tool.rbi b/rbi/openai/models/chat/chat_completion_function_tool.rbi new file mode 100644 index 00000000..b8fa3e81 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_function_tool.rbi @@ -0,0 +1,51 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionFunctionTool = Chat::ChatCompletionFunctionTool + + module Chat + class ChatCompletionFunctionTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionFunctionTool, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::FunctionDefinition) } + attr_reader :function + + sig { params(function: OpenAI::FunctionDefinition::OrHash).void } + attr_writer :function + + # The type of the tool. Currently, only `function` is supported. + sig { returns(Symbol) } + attr_accessor :type + + # A function tool that can be used to generate a response. + sig do + params( + function: OpenAI::FunctionDefinition::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + function:, + # The type of the tool. Currently, only `function` is supported. + type: :function + ) + end + + sig do + override.returns( + { function: OpenAI::FunctionDefinition, type: Symbol } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_message.rbi b/rbi/openai/models/chat/chat_completion_message.rbi new file mode 100644 index 00000000..2f30fe03 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_message.rbi @@ -0,0 +1,318 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionMessage = Chat::ChatCompletionMessage + + module Chat + class ChatCompletionMessage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessage, + OpenAI::Internal::AnyHash + ) + end + + # The contents of the message. + sig { returns(T.nilable(String)) } + attr_accessor :content + + # The refusal message generated by the model. + sig { returns(T.nilable(String)) } + attr_accessor :refusal + + # The role of the author of this message. + sig { returns(Symbol) } + attr_accessor :role + + # Annotations for the message, when applicable, as when using the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + sig do + returns( + T.nilable(T::Array[OpenAI::Chat::ChatCompletionMessage::Annotation]) + ) + end + attr_reader :annotations + + sig do + params( + annotations: + T::Array[OpenAI::Chat::ChatCompletionMessage::Annotation::OrHash] + ).void + end + attr_writer :annotations + + # If the audio output modality is requested, this object contains data about the + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). + sig { returns(T.nilable(OpenAI::Chat::ChatCompletionAudio)) } + attr_reader :audio + + sig do + params( + audio: T.nilable(OpenAI::Chat::ChatCompletionAudio::OrHash) + ).void + end + attr_writer :audio + + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + sig do + returns(T.nilable(OpenAI::Chat::ChatCompletionMessage::FunctionCall)) + end + attr_reader :function_call + + sig do + params( + function_call: + OpenAI::Chat::ChatCompletionMessage::FunctionCall::OrHash + ).void + end + attr_writer :function_call + + # The tool calls generated by the model, such as function calls. + sig do + returns( + T.nilable( + T::Array[OpenAI::Chat::ChatCompletionMessageToolCall::Variants] + ) + ) + end + attr_reader :tool_calls + + sig do + params( + tool_calls: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::OrHash, + OpenAI::Chat::ChatCompletionMessageCustomToolCall::OrHash + ) + ] + ).void + end + attr_writer :tool_calls + + # A chat completion message generated by the model. + sig do + params( + content: T.nilable(String), + refusal: T.nilable(String), + annotations: + T::Array[OpenAI::Chat::ChatCompletionMessage::Annotation::OrHash], + audio: T.nilable(OpenAI::Chat::ChatCompletionAudio::OrHash), + function_call: + OpenAI::Chat::ChatCompletionMessage::FunctionCall::OrHash, + tool_calls: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::OrHash, + OpenAI::Chat::ChatCompletionMessageCustomToolCall::OrHash + ) + ], + role: Symbol + ).returns(T.attached_class) + end + def self.new( + # The contents of the message. + content:, + # The refusal message generated by the model. + refusal:, + # Annotations for the message, when applicable, as when using the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + annotations: nil, + # If the audio output modality is requested, this object contains data about the + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). + audio: nil, + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + function_call: nil, + # The tool calls generated by the model, such as function calls. + tool_calls: nil, + # The role of the author of this message. + role: :assistant + ) + end + + sig do + override.returns( + { + content: T.nilable(String), + refusal: T.nilable(String), + role: Symbol, + annotations: + T::Array[OpenAI::Chat::ChatCompletionMessage::Annotation], + audio: T.nilable(OpenAI::Chat::ChatCompletionAudio), + function_call: OpenAI::Chat::ChatCompletionMessage::FunctionCall, + tool_calls: + T::Array[OpenAI::Chat::ChatCompletionMessageToolCall::Variants] + } + ) + end + def to_hash + end + + class Annotation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessage::Annotation, + OpenAI::Internal::AnyHash + ) + end + + # The type of the URL citation. Always `url_citation`. + sig { returns(Symbol) } + attr_accessor :type + + # A URL citation when using web search. + sig do + returns( + OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation + ) + end + attr_reader :url_citation + + sig do + params( + url_citation: + OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation::OrHash + ).void + end + attr_writer :url_citation + + # A URL citation when using web search. + sig do + params( + url_citation: + OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A URL citation when using web search. + url_citation:, + # The type of the URL citation. Always `url_citation`. + type: :url_citation + ) + end + + sig do + override.returns( + { + type: Symbol, + url_citation: + OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation + } + ) + end + def to_hash + end + + class URLCitation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation, + OpenAI::Internal::AnyHash + ) + end + + # The index of the last character of the URL citation in the message. + sig { returns(Integer) } + attr_accessor :end_index + + # The index of the first character of the URL citation in the message. + sig { returns(Integer) } + attr_accessor :start_index + + # The title of the web resource. + sig { returns(String) } + attr_accessor :title + + # The URL of the web resource. + sig { returns(String) } + attr_accessor :url + + # A URL citation when using web search. + sig do + params( + end_index: Integer, + start_index: Integer, + title: String, + url: String + ).returns(T.attached_class) + end + def self.new( + # The index of the last character of the URL citation in the message. + end_index:, + # The index of the first character of the URL citation in the message. + start_index:, + # The title of the web resource. + title:, + # The URL of the web resource. + url: + ) + end + + sig do + override.returns( + { + end_index: Integer, + start_index: Integer, + title: String, + url: String + } + ) + end + def to_hash + end + end + end + + class FunctionCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessage::FunctionCall, + OpenAI::Internal::AnyHash + ) + end + + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + sig { returns(String) } + attr_accessor :arguments + + # The name of the function to call. + sig { returns(String) } + attr_accessor :name + + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. + sig do + params(arguments: String, name: String).returns(T.attached_class) + end + def self.new( + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + arguments:, + # The name of the function to call. + name: + ) + end + + sig { override.returns({ arguments: String, name: String }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi b/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi new file mode 100644 index 00000000..77a50d60 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi @@ -0,0 +1,105 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionMessageCustomToolCall = + Chat::ChatCompletionMessageCustomToolCall + + module Chat + class ChatCompletionMessageCustomToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessageCustomToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the tool call. + sig { returns(String) } + attr_accessor :id + + # The custom tool that the model called. + sig do + returns(OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom) + end + attr_reader :custom + + sig do + params( + custom: + OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom::OrHash + ).void + end + attr_writer :custom + + # The type of the tool. Always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # A call to a custom tool created by the model. + sig do + params( + id: String, + custom: + OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the tool call. + id:, + # The custom tool that the model called. + custom:, + # The type of the tool. Always `custom`. + type: :custom + ) + end + + sig do + override.returns( + { + id: String, + custom: OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom, + type: Symbol + } + ) + end + def to_hash + end + + class Custom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom, + OpenAI::Internal::AnyHash + ) + end + + # The input for the custom tool call generated by the model. + sig { returns(String) } + attr_accessor :input + + # The name of the custom tool to call. + sig { returns(String) } + attr_accessor :name + + # The custom tool that the model called. + sig { params(input: String, name: String).returns(T.attached_class) } + def self.new( + # The input for the custom tool call generated by the model. + input:, + # The name of the custom tool to call. + name: + ) + end + + sig { override.returns({ input: String, name: String }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi b/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi new file mode 100644 index 00000000..a6d11892 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi @@ -0,0 +1,114 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionMessageFunctionToolCall = + Chat::ChatCompletionMessageFunctionToolCall + + module Chat + class ChatCompletionMessageFunctionToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the tool call. + sig { returns(String) } + attr_accessor :id + + # The function that the model called. + sig do + returns(OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function) + end + attr_reader :function + + sig do + params( + function: + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function::OrHash + ).void + end + attr_writer :function + + # The type of the tool. Currently, only `function` is supported. + sig { returns(Symbol) } + attr_accessor :type + + # A call to a function tool created by the model. + sig do + params( + id: String, + function: + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the tool call. + id:, + # The function that the model called. + function:, + # The type of the tool. Currently, only `function` is supported. + type: :function + ) + end + + sig do + override.returns( + { + id: String, + function: + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function, + type: Symbol + } + ) + end + def to_hash + end + + class Function < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function, + OpenAI::Internal::AnyHash + ) + end + + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + sig { returns(String) } + attr_accessor :arguments + + # The name of the function to call. + sig { returns(String) } + attr_accessor :name + + # The function that the model called. + sig do + params(arguments: String, name: String).returns(T.attached_class) + end + def self.new( + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + arguments:, + # The name of the function to call. + name: + ) + end + + sig { override.returns({ arguments: String, name: String }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_message_param.rbi b/rbi/openai/models/chat/chat_completion_message_param.rbi new file mode 100644 index 00000000..ae3e1358 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_message_param.rbi @@ -0,0 +1,36 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionMessageParam = Chat::ChatCompletionMessageParam + + module Chat + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. + module ChatCompletionMessageParam + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionDeveloperMessageParam, + OpenAI::Chat::ChatCompletionSystemMessageParam, + OpenAI::Chat::ChatCompletionUserMessageParam, + OpenAI::Chat::ChatCompletionAssistantMessageParam, + OpenAI::Chat::ChatCompletionToolMessageParam, + OpenAI::Chat::ChatCompletionFunctionMessageParam + ) + end + + sig do + override.returns( + T::Array[OpenAI::Chat::ChatCompletionMessageParam::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_message_tool_call.rbi b/rbi/openai/models/chat/chat_completion_message_tool_call.rbi new file mode 100644 index 00000000..6c99f830 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_message_tool_call.rbi @@ -0,0 +1,30 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionMessageToolCall = Chat::ChatCompletionMessageToolCall + + module Chat + # A call to a function tool created by the model. + module ChatCompletionMessageToolCall + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall, + OpenAI::Chat::ChatCompletionMessageCustomToolCall + ) + end + + sig do + override.returns( + T::Array[OpenAI::Chat::ChatCompletionMessageToolCall::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_modality.rbi b/rbi/openai/models/chat/chat_completion_modality.rbi new file mode 100644 index 00000000..e65aca82 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_modality.rbi @@ -0,0 +1,29 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionModality = Chat::ChatCompletionModality + + module Chat + module ChatCompletionModality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Chat::ChatCompletionModality) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = T.let(:text, OpenAI::Chat::ChatCompletionModality::TaggedSymbol) + AUDIO = + T.let(:audio, OpenAI::Chat::ChatCompletionModality::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Chat::ChatCompletionModality::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi b/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi new file mode 100644 index 00000000..f85abf64 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi @@ -0,0 +1,86 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionNamedToolChoice = Chat::ChatCompletionNamedToolChoice + + module Chat + class ChatCompletionNamedToolChoice < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionNamedToolChoice, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::Chat::ChatCompletionNamedToolChoice::Function) } + attr_reader :function + + sig do + params( + function: + OpenAI::Chat::ChatCompletionNamedToolChoice::Function::OrHash + ).void + end + attr_writer :function + + # For function calling, the type is always `function`. + sig { returns(Symbol) } + attr_accessor :type + + # Specifies a tool the model should use. Use to force the model to call a specific + # function. + sig do + params( + function: + OpenAI::Chat::ChatCompletionNamedToolChoice::Function::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + function:, + # For function calling, the type is always `function`. + type: :function + ) + end + + sig do + override.returns( + { + function: OpenAI::Chat::ChatCompletionNamedToolChoice::Function, + type: Symbol + } + ) + end + def to_hash + end + + class Function < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionNamedToolChoice::Function, + OpenAI::Internal::AnyHash + ) + end + + # The name of the function to call. + sig { returns(String) } + attr_accessor :name + + sig { params(name: String).returns(T.attached_class) } + def self.new( + # The name of the function to call. + name: + ) + end + + sig { override.returns({ name: String }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi b/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi new file mode 100644 index 00000000..0c4ba4f2 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi @@ -0,0 +1,89 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionNamedToolChoiceCustom = + Chat::ChatCompletionNamedToolChoiceCustom + + module Chat + class ChatCompletionNamedToolChoiceCustom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns(OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom) + end + attr_reader :custom + + sig do + params( + custom: + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom::OrHash + ).void + end + attr_writer :custom + + # For custom tool calling, the type is always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # Specifies a tool the model should use. Use to force the model to call a specific + # custom tool. + sig do + params( + custom: + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + custom:, + # For custom tool calling, the type is always `custom`. + type: :custom + ) + end + + sig do + override.returns( + { + custom: OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom, + type: Symbol + } + ) + end + def to_hash + end + + class Custom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom, + OpenAI::Internal::AnyHash + ) + end + + # The name of the custom tool to call. + sig { returns(String) } + attr_accessor :name + + sig { params(name: String).returns(T.attached_class) } + def self.new( + # The name of the custom tool to call. + name: + ) + end + + sig { override.returns({ name: String }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_prediction_content.rbi b/rbi/openai/models/chat/chat_completion_prediction_content.rbi new file mode 100644 index 00000000..e6fde114 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_prediction_content.rbi @@ -0,0 +1,99 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionPredictionContent = Chat::ChatCompletionPredictionContent + + module Chat + class ChatCompletionPredictionContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionPredictionContent, + OpenAI::Internal::AnyHash + ) + end + + # The content that should be matched when generating a model response. If + # generated tokens would match this content, the entire model response can be + # returned much more quickly. + sig do + returns( + OpenAI::Chat::ChatCompletionPredictionContent::Content::Variants + ) + end + attr_accessor :content + + # The type of the predicted content you want to provide. This type is currently + # always `content`. + sig { returns(Symbol) } + attr_accessor :type + + # Static predicted output content, such as the content of a text file that is + # being regenerated. + sig do + params( + content: + OpenAI::Chat::ChatCompletionPredictionContent::Content::Variants, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The content that should be matched when generating a model response. If + # generated tokens would match this content, the entire model response can be + # returned much more quickly. + content:, + # The type of the predicted content you want to provide. This type is currently + # always `content`. + type: :content + ) + end + + sig do + override.returns( + { + content: + OpenAI::Chat::ChatCompletionPredictionContent::Content::Variants, + type: Symbol + } + ) + end + def to_hash + end + + # The content that should be matched when generating a model response. If + # generated tokens would match this content, the entire model response can be + # returned much more quickly. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[OpenAI::Chat::ChatCompletionContentPartText] + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionPredictionContent::Content::Variants + ] + ) + end + def self.variants + end + + ChatCompletionContentPartTextArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Chat::ChatCompletionContentPartText + ], + OpenAI::Internal::Type::Converter + ) + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_reasoning_effort.rbi b/rbi/openai/models/chat/chat_completion_reasoning_effort.rbi new file mode 100644 index 00000000..87be1185 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_reasoning_effort.rbi @@ -0,0 +1,11 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionReasoningEffort = Chat::ChatCompletionReasoningEffort + + module Chat + ChatCompletionReasoningEffort = OpenAI::Models::ReasoningEffort + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_role.rbi b/rbi/openai/models/chat/chat_completion_role.rbi new file mode 100644 index 00000000..63f5fc3d --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_role.rbi @@ -0,0 +1,36 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionRole = Chat::ChatCompletionRole + + module Chat + # The role of the author of a message + module ChatCompletionRole + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Chat::ChatCompletionRole) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + DEVELOPER = + T.let(:developer, OpenAI::Chat::ChatCompletionRole::TaggedSymbol) + SYSTEM = T.let(:system, OpenAI::Chat::ChatCompletionRole::TaggedSymbol) + USER = T.let(:user, OpenAI::Chat::ChatCompletionRole::TaggedSymbol) + ASSISTANT = + T.let(:assistant, OpenAI::Chat::ChatCompletionRole::TaggedSymbol) + TOOL = T.let(:tool, OpenAI::Chat::ChatCompletionRole::TaggedSymbol) + FUNCTION = + T.let(:function, OpenAI::Chat::ChatCompletionRole::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Chat::ChatCompletionRole::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_store_message.rbi b/rbi/openai/models/chat/chat_completion_store_message.rbi new file mode 100644 index 00000000..dee06a9d --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_store_message.rbi @@ -0,0 +1,100 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionStoreMessage = Chat::ChatCompletionStoreMessage + + module Chat + class ChatCompletionStoreMessage < OpenAI::Models::Chat::ChatCompletionMessage + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionStoreMessage, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the chat message. + sig { returns(String) } + attr_accessor :id + + # If a content parts array was provided, this is an array of `text` and + # `image_url` parts. Otherwise, null. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Chat::ChatCompletionStoreMessage::ContentPart::Variants + ] + ) + ) + end + attr_accessor :content_parts + + # A chat completion message generated by the model. + sig do + params( + id: String, + content_parts: + T.nilable( + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionContentPartText::OrHash, + OpenAI::Chat::ChatCompletionContentPartImage::OrHash + ) + ] + ) + ).returns(T.attached_class) + end + def self.new( + # The identifier of the chat message. + id:, + # If a content parts array was provided, this is an array of `text` and + # `image_url` parts. Otherwise, null. + content_parts: nil + ) + end + + sig do + override.returns( + { + id: String, + content_parts: + T.nilable( + T::Array[ + OpenAI::Chat::ChatCompletionStoreMessage::ContentPart::Variants + ] + ) + } + ) + end + def to_hash + end + + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). + module ContentPart + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionContentPartText, + OpenAI::Chat::ChatCompletionContentPartImage + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionStoreMessage::ContentPart::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_stream_options.rbi b/rbi/openai/models/chat/chat_completion_stream_options.rbi new file mode 100644 index 00000000..e970e19e --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_stream_options.rbi @@ -0,0 +1,78 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionStreamOptions = Chat::ChatCompletionStreamOptions + + module Chat + class ChatCompletionStreamOptions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionStreamOptions, + OpenAI::Internal::AnyHash + ) + end + + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :include_obfuscation + + sig { params(include_obfuscation: T::Boolean).void } + attr_writer :include_obfuscation + + # If set, an additional chunk will be streamed before the `data: [DONE]` message. + # The `usage` field on this chunk shows the token usage statistics for the entire + # request, and the `choices` field will always be an empty array. + # + # All other chunks will also include a `usage` field, but with a null value. + # **NOTE:** If the stream is interrupted, you may not receive the final usage + # chunk which contains the total token usage for the request. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :include_usage + + sig { params(include_usage: T::Boolean).void } + attr_writer :include_usage + + # Options for streaming response. Only set this when you set `stream: true`. + sig do + params( + include_obfuscation: T::Boolean, + include_usage: T::Boolean + ).returns(T.attached_class) + end + def self.new( + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + include_obfuscation: nil, + # If set, an additional chunk will be streamed before the `data: [DONE]` message. + # The `usage` field on this chunk shows the token usage statistics for the entire + # request, and the `choices` field will always be an empty array. + # + # All other chunks will also include a `usage` field, but with a null value. + # **NOTE:** If the stream is interrupted, you may not receive the final usage + # chunk which contains the total token usage for the request. + include_usage: nil + ) + end + + sig do + override.returns( + { include_obfuscation: T::Boolean, include_usage: T::Boolean } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_system_message_param.rbi b/rbi/openai/models/chat/chat_completion_system_message_param.rbi new file mode 100644 index 00000000..9a4f0597 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_system_message_param.rbi @@ -0,0 +1,105 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionSystemMessageParam = Chat::ChatCompletionSystemMessageParam + + module Chat + class ChatCompletionSystemMessageParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionSystemMessageParam, + OpenAI::Internal::AnyHash + ) + end + + # The contents of the system message. + sig do + returns( + OpenAI::Chat::ChatCompletionSystemMessageParam::Content::Variants + ) + end + attr_accessor :content + + # The role of the messages author, in this case `system`. + sig { returns(Symbol) } + attr_accessor :role + + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, use `developer` messages + # for this purpose instead. + sig do + params( + content: + OpenAI::Chat::ChatCompletionSystemMessageParam::Content::Variants, + name: String, + role: Symbol + ).returns(T.attached_class) + end + def self.new( + # The contents of the system message. + content:, + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. + name: nil, + # The role of the messages author, in this case `system`. + role: :system + ) + end + + sig do + override.returns( + { + content: + OpenAI::Chat::ChatCompletionSystemMessageParam::Content::Variants, + role: Symbol, + name: String + } + ) + end + def to_hash + end + + # The contents of the system message. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[OpenAI::Chat::ChatCompletionContentPartText] + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionSystemMessageParam::Content::Variants + ] + ) + end + def self.variants + end + + ChatCompletionContentPartTextArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Chat::ChatCompletionContentPartText + ], + OpenAI::Internal::Type::Converter + ) + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_token_logprob.rbi b/rbi/openai/models/chat/chat_completion_token_logprob.rbi new file mode 100644 index 00000000..0cca0836 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_token_logprob.rbi @@ -0,0 +1,151 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionTokenLogprob = Chat::ChatCompletionTokenLogprob + + module Chat + class ChatCompletionTokenLogprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionTokenLogprob, + OpenAI::Internal::AnyHash + ) + end + + # The token. + sig { returns(String) } + attr_accessor :token + + # A list of integers representing the UTF-8 bytes representation of the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. + sig { returns(T.nilable(T::Array[Integer])) } + attr_accessor :bytes + + # The log probability of this token, if it is within the top 20 most likely + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. + sig { returns(Float) } + attr_accessor :logprob + + # List of the most likely tokens and their log probability, at this token + # position. In rare cases, there may be fewer than the number of requested + # `top_logprobs` returned. + sig do + returns( + T::Array[OpenAI::Chat::ChatCompletionTokenLogprob::TopLogprob] + ) + end + attr_accessor :top_logprobs + + sig do + params( + token: String, + bytes: T.nilable(T::Array[Integer]), + logprob: Float, + top_logprobs: + T::Array[ + OpenAI::Chat::ChatCompletionTokenLogprob::TopLogprob::OrHash + ] + ).returns(T.attached_class) + end + def self.new( + # The token. + token:, + # A list of integers representing the UTF-8 bytes representation of the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. + bytes:, + # The log probability of this token, if it is within the top 20 most likely + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. + logprob:, + # List of the most likely tokens and their log probability, at this token + # position. In rare cases, there may be fewer than the number of requested + # `top_logprobs` returned. + top_logprobs: + ) + end + + sig do + override.returns( + { + token: String, + bytes: T.nilable(T::Array[Integer]), + logprob: Float, + top_logprobs: + T::Array[OpenAI::Chat::ChatCompletionTokenLogprob::TopLogprob] + } + ) + end + def to_hash + end + + class TopLogprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionTokenLogprob::TopLogprob, + OpenAI::Internal::AnyHash + ) + end + + # The token. + sig { returns(String) } + attr_accessor :token + + # A list of integers representing the UTF-8 bytes representation of the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. + sig { returns(T.nilable(T::Array[Integer])) } + attr_accessor :bytes + + # The log probability of this token, if it is within the top 20 most likely + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. + sig { returns(Float) } + attr_accessor :logprob + + sig do + params( + token: String, + bytes: T.nilable(T::Array[Integer]), + logprob: Float + ).returns(T.attached_class) + end + def self.new( + # The token. + token:, + # A list of integers representing the UTF-8 bytes representation of the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. + bytes:, + # The log probability of this token, if it is within the top 20 most likely + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. + logprob: + ) + end + + sig do + override.returns( + { + token: String, + bytes: T.nilable(T::Array[Integer]), + logprob: Float + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_tool.rbi b/rbi/openai/models/chat/chat_completion_tool.rbi new file mode 100644 index 00000000..4f687406 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_tool.rbi @@ -0,0 +1,28 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionTool = Chat::ChatCompletionTool + + module Chat + # A function tool that can be used to generate a response. + module ChatCompletionTool + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionFunctionTool, + OpenAI::Chat::ChatCompletionCustomTool + ) + end + + sig do + override.returns(T::Array[OpenAI::Chat::ChatCompletionTool::Variants]) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi b/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi new file mode 100644 index 00000000..df0f67a7 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi @@ -0,0 +1,79 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionToolChoiceOption = Chat::ChatCompletionToolChoiceOption + + module Chat + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. + module ChatCompletionToolChoiceOption + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::TaggedSymbol, + OpenAI::Chat::ChatCompletionAllowedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom + ) + end + + # `none` means the model will not call any tool and instead generates a message. + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools. + module Auto + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::ChatCompletionToolChoiceOption::Auto) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + NONE = + T.let( + :none, + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::TaggedSymbol + ) + REQUIRED = + T.let( + :required, + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::TaggedSymbol + ] + ) + end + def self.values + end + end + + sig do + override.returns( + T::Array[OpenAI::Chat::ChatCompletionToolChoiceOption::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_tool_message_param.rbi b/rbi/openai/models/chat/chat_completion_tool_message_param.rbi new file mode 100644 index 00000000..c0ba7e87 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_tool_message_param.rbi @@ -0,0 +1,97 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionToolMessageParam = Chat::ChatCompletionToolMessageParam + + module Chat + class ChatCompletionToolMessageParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionToolMessageParam, + OpenAI::Internal::AnyHash + ) + end + + # The contents of the tool message. + sig do + returns( + OpenAI::Chat::ChatCompletionToolMessageParam::Content::Variants + ) + end + attr_accessor :content + + # The role of the messages author, in this case `tool`. + sig { returns(Symbol) } + attr_accessor :role + + # Tool call that this message is responding to. + sig { returns(String) } + attr_accessor :tool_call_id + + sig do + params( + content: + OpenAI::Chat::ChatCompletionToolMessageParam::Content::Variants, + tool_call_id: String, + role: Symbol + ).returns(T.attached_class) + end + def self.new( + # The contents of the tool message. + content:, + # Tool call that this message is responding to. + tool_call_id:, + # The role of the messages author, in this case `tool`. + role: :tool + ) + end + + sig do + override.returns( + { + content: + OpenAI::Chat::ChatCompletionToolMessageParam::Content::Variants, + role: Symbol, + tool_call_id: String + } + ) + end + def to_hash + end + + # The contents of the tool message. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[OpenAI::Chat::ChatCompletionContentPartText] + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionToolMessageParam::Content::Variants + ] + ) + end + def self.variants + end + + ChatCompletionContentPartTextArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Chat::ChatCompletionContentPartText + ], + OpenAI::Internal::Type::Converter + ) + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_user_message_param.rbi b/rbi/openai/models/chat/chat_completion_user_message_param.rbi new file mode 100644 index 00000000..f0fdf008 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_user_message_param.rbi @@ -0,0 +1,104 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionUserMessageParam = Chat::ChatCompletionUserMessageParam + + module Chat + class ChatCompletionUserMessageParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionUserMessageParam, + OpenAI::Internal::AnyHash + ) + end + + # The contents of the user message. + sig do + returns( + OpenAI::Chat::ChatCompletionUserMessageParam::Content::Variants + ) + end + attr_accessor :content + + # The role of the messages author, in this case `user`. + sig { returns(Symbol) } + attr_accessor :role + + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + # Messages sent by an end user, containing prompts or additional context + # information. + sig do + params( + content: + OpenAI::Chat::ChatCompletionUserMessageParam::Content::Variants, + name: String, + role: Symbol + ).returns(T.attached_class) + end + def self.new( + # The contents of the user message. + content:, + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. + name: nil, + # The role of the messages author, in this case `user`. + role: :user + ) + end + + sig do + override.returns( + { + content: + OpenAI::Chat::ChatCompletionUserMessageParam::Content::Variants, + role: Symbol, + name: String + } + ) + end + def to_hash + end + + # The contents of the user message. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[OpenAI::Chat::ChatCompletionContentPart::Variants] + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionUserMessageParam::Content::Variants + ] + ) + end + def self.variants + end + + ChatCompletionContentPartArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::Chat::ChatCompletionContentPart + ], + OpenAI::Internal::Type::Converter + ) + end + end + end + end +end diff --git a/rbi/openai/models/chat/completion_create_params.rbi b/rbi/openai/models/chat/completion_create_params.rbi new file mode 100644 index 00000000..9d7cf007 --- /dev/null +++ b/rbi/openai/models/chat/completion_create_params.rbi @@ -0,0 +1,1474 @@ +# typed: strong + +module OpenAI + module Models + module Chat + class CompletionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::CompletionCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # A list of messages comprising the conversation so far. Depending on the + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). + sig do + returns( + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionDeveloperMessageParam, + OpenAI::Chat::ChatCompletionSystemMessageParam, + OpenAI::Chat::ChatCompletionUserMessageParam, + OpenAI::Chat::ChatCompletionAssistantMessageParam, + OpenAI::Chat::ChatCompletionToolMessageParam, + OpenAI::Chat::ChatCompletionFunctionMessageParam + ) + ] + ) + end + attr_accessor :messages + + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + sig { returns(T.any(String, OpenAI::ChatModel::OrSymbol)) } + attr_accessor :model + + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). + sig { returns(T.nilable(OpenAI::Chat::ChatCompletionAudioParam)) } + attr_reader :audio + + sig do + params( + audio: T.nilable(OpenAI::Chat::ChatCompletionAudioParam::OrHash) + ).void + end + attr_writer :audio + + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + sig { returns(T.nilable(Float)) } + attr_accessor :frequency_penalty + + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. + sig do + returns( + T.nilable( + T.any( + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode::OrSymbol, + OpenAI::Chat::ChatCompletionFunctionCallOption + ) + ) + ) + end + attr_reader :function_call + + sig do + params( + function_call: + T.any( + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode::OrSymbol, + OpenAI::Chat::ChatCompletionFunctionCallOption::OrHash + ) + ).void + end + attr_writer :function_call + + # Deprecated in favor of `tools`. + # + # A list of functions the model may generate JSON inputs for. + sig do + returns( + T.nilable(T::Array[OpenAI::Chat::CompletionCreateParams::Function]) + ) + end + attr_reader :functions + + sig do + params( + functions: + T::Array[OpenAI::Chat::CompletionCreateParams::Function::OrHash] + ).void + end + attr_writer :functions + + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. + sig { returns(T.nilable(T::Hash[Symbol, Integer])) } + attr_accessor :logit_bias + + # Whether to return log probabilities of the output tokens or not. If true, + # returns the log probabilities of each output token returned in the `content` of + # `message`. + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :logprobs + + # An upper bound for the number of tokens that can be generated for a completion, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + sig { returns(T.nilable(Integer)) } + attr_accessor :max_completion_tokens + + # The maximum number of [tokens](/tokenizer) that can be generated in the chat + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. + # + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o-series models](https://platform.openai.com/docs/guides/reasoning). + sig { returns(T.nilable(Integer)) } + attr_accessor :max_tokens + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # Output types that you would like the model to generate. Most models are capable + # of generating text, which is the default: + # + # `["text"]` + # + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: + # + # `["text", "audio"]` + sig do + returns( + T.nilable( + T::Array[OpenAI::Chat::CompletionCreateParams::Modality::OrSymbol] + ) + ) + end + attr_accessor :modalities + + # How many chat completion choices to generate for each input message. Note that + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. + sig { returns(T.nilable(Integer)) } + attr_accessor :n + + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :parallel_tool_calls + + sig { params(parallel_tool_calls: T::Boolean).void } + attr_writer :parallel_tool_calls + + # Static predicted output content, such as the content of a text file that is + # being regenerated. + sig do + returns(T.nilable(OpenAI::Chat::ChatCompletionPredictionContent)) + end + attr_reader :prediction + + sig do + params( + prediction: + T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash) + ).void + end + attr_writer :prediction + + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + sig { returns(T.nilable(Float)) } + attr_accessor :presence_penalty + + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + sig { returns(T.nilable(String)) } + attr_reader :prompt_cache_key + + sig { params(prompt_cache_key: String).void } + attr_writer :prompt_cache_key + + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } + attr_accessor :reasoning_effort + + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + sig do + returns( + T.nilable( + T.any( + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONSchema, + OpenAI::ResponseFormatJSONObject + ) + ) + ) + end + attr_reader :response_format + + sig do + params( + response_format: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).void + end + attr_writer :response_format + + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + sig { returns(T.nilable(String)) } + attr_reader :safety_identifier + + sig { params(safety_identifier: String).void } + attr_writer :safety_identifier + + # This feature is in Beta. If specified, our system will make a best effort to + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. + sig { returns(T.nilable(Integer)) } + attr_accessor :seed + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + sig do + returns( + T.nilable( + OpenAI::Chat::CompletionCreateParams::ServiceTier::OrSymbol + ) + ) + end + attr_accessor :service_tier + + # Not supported with latest reasoning models `o3` and `o4-mini`. + # + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. + sig do + returns( + T.nilable(OpenAI::Chat::CompletionCreateParams::Stop::Variants) + ) + end + attr_accessor :stop + + # Whether or not to store the output of this chat completion request for use in + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. + # + # Supports text and image inputs. Note: image inputs over 8MB will be dropped. + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :store + + # Options for streaming response. Only set this when you set `stream: true`. + sig { returns(T.nilable(OpenAI::Chat::ChatCompletionStreamOptions)) } + attr_reader :stream_options + + sig do + params( + stream_options: + T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash) + ).void + end + attr_writer :stream_options + + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. + sig do + returns( + T.nilable( + T.any( + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, + OpenAI::Chat::ChatCompletionAllowedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom + ) + ) + ) + end + attr_reader :tool_choice + + sig do + params( + tool_choice: + T.any( + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, + OpenAI::Chat::ChatCompletionAllowedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::OrHash + ) + ).void + end + attr_writer :tool_choice + + # A list of tools the model may call. You can provide either + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # or [function tools](https://platform.openai.com/docs/guides/function-calling). + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionFunctionTool, + OpenAI::Chat::ChatCompletionCustomTool + ) + ] + ) + ) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionFunctionTool::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::OrHash + ) + ] + ).void + end + attr_writer :tools + + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. + sig { returns(T.nilable(Integer)) } + attr_accessor :top_logprobs + + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + sig { returns(T.nilable(String)) } + attr_reader :user + + sig { params(user: String).void } + attr_writer :user + + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + sig do + returns( + T.nilable(OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol) + ) + end + attr_accessor :verbosity + + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + sig do + returns( + T.nilable(OpenAI::Chat::CompletionCreateParams::WebSearchOptions) + ) + end + attr_reader :web_search_options + + sig do + params( + web_search_options: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::OrHash + ).void + end + attr_writer :web_search_options + + sig do + params( + messages: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionDeveloperMessageParam::OrHash, + OpenAI::Chat::ChatCompletionSystemMessageParam::OrHash, + OpenAI::Chat::ChatCompletionUserMessageParam::OrHash, + OpenAI::Chat::ChatCompletionAssistantMessageParam::OrHash, + OpenAI::Chat::ChatCompletionToolMessageParam::OrHash, + OpenAI::Chat::ChatCompletionFunctionMessageParam::OrHash + ) + ], + model: T.any(String, OpenAI::ChatModel::OrSymbol), + audio: T.nilable(OpenAI::Chat::ChatCompletionAudioParam::OrHash), + frequency_penalty: T.nilable(Float), + function_call: + T.any( + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode::OrSymbol, + OpenAI::Chat::ChatCompletionFunctionCallOption::OrHash + ), + functions: + T::Array[OpenAI::Chat::CompletionCreateParams::Function::OrHash], + logit_bias: T.nilable(T::Hash[Symbol, Integer]), + logprobs: T.nilable(T::Boolean), + max_completion_tokens: T.nilable(Integer), + max_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + modalities: + T.nilable( + T::Array[ + OpenAI::Chat::CompletionCreateParams::Modality::OrSymbol + ] + ), + n: T.nilable(Integer), + parallel_tool_calls: T::Boolean, + prediction: + T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash), + presence_penalty: T.nilable(Float), + prompt_cache_key: String, + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ), + safety_identifier: String, + seed: T.nilable(Integer), + service_tier: + T.nilable( + OpenAI::Chat::CompletionCreateParams::ServiceTier::OrSymbol + ), + stop: + T.nilable(OpenAI::Chat::CompletionCreateParams::Stop::Variants), + store: T.nilable(T::Boolean), + stream_options: + T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash), + temperature: T.nilable(Float), + tool_choice: + T.any( + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, + OpenAI::Chat::ChatCompletionAllowedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionFunctionTool::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::OrHash + ) + ], + top_logprobs: T.nilable(Integer), + top_p: T.nilable(Float), + user: String, + verbosity: + T.nilable( + OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol + ), + web_search_options: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::OrHash, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A list of messages comprising the conversation so far. Depending on the + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). + messages:, + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + model:, + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). + audio: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + frequency_penalty: nil, + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. + function_call: nil, + # Deprecated in favor of `tools`. + # + # A list of functions the model may generate JSON inputs for. + functions: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. + logit_bias: nil, + # Whether to return log probabilities of the output tokens or not. If true, + # returns the log probabilities of each output token returned in the `content` of + # `message`. + logprobs: nil, + # An upper bound for the number of tokens that can be generated for a completion, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_completion_tokens: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the chat + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. + # + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o-series models](https://platform.openai.com/docs/guides/reasoning). + max_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # Output types that you would like the model to generate. Most models are capable + # of generating text, which is the default: + # + # `["text"]` + # + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: + # + # `["text", "audio"]` + modalities: nil, + # How many chat completion choices to generate for each input message. Note that + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. + n: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + parallel_tool_calls: nil, + # Static predicted output content, such as the content of a text file that is + # being regenerated. + prediction: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + presence_penalty: nil, + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_key: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + response_format: nil, + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + safety_identifier: nil, + # This feature is in Beta. If specified, our system will make a best effort to + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. + seed: nil, + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + service_tier: nil, + # Not supported with latest reasoning models `o3` and `o4-mini`. + # + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. + stop: nil, + # Whether or not to store the output of this chat completion request for use in + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. + # + # Supports text and image inputs. Note: image inputs over 8MB will be dropped. + store: nil, + # Options for streaming response. Only set this when you set `stream: true`. + stream_options: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. + temperature: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. + tool_choice: nil, + # A list of tools the model may call. You can provide either + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # or [function tools](https://platform.openai.com/docs/guides/function-calling). + tools: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. + top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + top_p: nil, + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + user: nil, + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + verbosity: nil, + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + web_search_options: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + messages: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionDeveloperMessageParam, + OpenAI::Chat::ChatCompletionSystemMessageParam, + OpenAI::Chat::ChatCompletionUserMessageParam, + OpenAI::Chat::ChatCompletionAssistantMessageParam, + OpenAI::Chat::ChatCompletionToolMessageParam, + OpenAI::Chat::ChatCompletionFunctionMessageParam + ) + ], + model: T.any(String, OpenAI::ChatModel::OrSymbol), + audio: T.nilable(OpenAI::Chat::ChatCompletionAudioParam), + frequency_penalty: T.nilable(Float), + function_call: + T.any( + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode::OrSymbol, + OpenAI::Chat::ChatCompletionFunctionCallOption + ), + functions: + T::Array[OpenAI::Chat::CompletionCreateParams::Function], + logit_bias: T.nilable(T::Hash[Symbol, Integer]), + logprobs: T.nilable(T::Boolean), + max_completion_tokens: T.nilable(Integer), + max_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + modalities: + T.nilable( + T::Array[ + OpenAI::Chat::CompletionCreateParams::Modality::OrSymbol + ] + ), + n: T.nilable(Integer), + parallel_tool_calls: T::Boolean, + prediction: + T.nilable(OpenAI::Chat::ChatCompletionPredictionContent), + presence_penalty: T.nilable(Float), + prompt_cache_key: String, + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.any( + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONSchema, + OpenAI::ResponseFormatJSONObject + ), + safety_identifier: String, + seed: T.nilable(Integer), + service_tier: + T.nilable( + OpenAI::Chat::CompletionCreateParams::ServiceTier::OrSymbol + ), + stop: + T.nilable(OpenAI::Chat::CompletionCreateParams::Stop::Variants), + store: T.nilable(T::Boolean), + stream_options: + T.nilable(OpenAI::Chat::ChatCompletionStreamOptions), + temperature: T.nilable(Float), + tool_choice: + T.any( + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, + OpenAI::Chat::ChatCompletionAllowedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom + ), + tools: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionFunctionTool, + OpenAI::Chat::ChatCompletionCustomTool + ) + ], + top_logprobs: T.nilable(Integer), + top_p: T.nilable(Float), + user: String, + verbosity: + T.nilable( + OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol + ), + web_search_options: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::ChatModel::TaggedSymbol) } + + sig do + override.returns( + T::Array[OpenAI::Chat::CompletionCreateParams::Model::Variants] + ) + end + def self.variants + end + end + + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. + module FunctionCall + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode::TaggedSymbol, + OpenAI::Chat::ChatCompletionFunctionCallOption + ) + end + + # `none` means the model will not call a function and instead generates a message. + # `auto` means the model can pick between generating a message or calling a + # function. + module FunctionCallMode + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + NONE = + T.let( + :none, + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode::TaggedSymbol + ] + ) + end + def self.values + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::CompletionCreateParams::FunctionCall::Variants + ] + ) + end + def self.variants + end + end + + class Function < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::CompletionCreateParams::Function, + OpenAI::Internal::AnyHash + ) + end + + # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + # underscores and dashes, with a maximum length of 64. + sig { returns(String) } + attr_accessor :name + + # A description of what the function does, used by the model to choose when and + # how to call the function. + sig { returns(T.nilable(String)) } + attr_reader :description + + sig { params(description: String).void } + attr_writer :description + + # The parameters the functions accepts, described as a JSON Schema object. See the + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. + # + # Omitting `parameters` defines a function with an empty parameter list. + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :parameters + + sig { params(parameters: T::Hash[Symbol, T.anything]).void } + attr_writer :parameters + + sig do + params( + name: String, + description: String, + parameters: T::Hash[Symbol, T.anything] + ).returns(T.attached_class) + end + def self.new( + # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + # underscores and dashes, with a maximum length of 64. + name:, + # A description of what the function does, used by the model to choose when and + # how to call the function. + description: nil, + # The parameters the functions accepts, described as a JSON Schema object. See the + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. + # + # Omitting `parameters` defines a function with an empty parameter list. + parameters: nil + ) + end + + sig do + override.returns( + { + name: String, + description: String, + parameters: T::Hash[Symbol, T.anything] + } + ) + end + def to_hash + end + end + + module Modality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::CompletionCreateParams::Modality) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Chat::CompletionCreateParams::Modality::TaggedSymbol + ) + AUDIO = + T.let( + :audio, + OpenAI::Chat::CompletionCreateParams::Modality::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::CompletionCreateParams::Modality::TaggedSymbol + ] + ) + end + def self.values + end + end + + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + module ResponseFormat + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONSchema, + OpenAI::ResponseFormatJSONObject + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::CompletionCreateParams::ResponseFormat::Variants + ] + ) + end + def self.variants + end + end + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + module ServiceTier + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::CompletionCreateParams::ServiceTier) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol + ) + DEFAULT = + T.let( + :default, + OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol + ) + FLEX = + T.let( + :flex, + OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol + ) + SCALE = + T.let( + :scale, + OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol + ) + PRIORITY = + T.let( + :priority, + OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol + ] + ) + end + def self.values + end + end + + # Not supported with latest reasoning models `o3` and `o4-mini`. + # + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. + module Stop + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.nilable(T.any(String, T::Array[String])) } + + sig do + override.returns( + T::Array[OpenAI::Chat::CompletionCreateParams::Stop::Variants] + ) + end + def self.variants + end + + StringArray = + T.let( + OpenAI::Internal::Type::ArrayOf[String], + OpenAI::Internal::Type::Converter + ) + end + + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + module Verbosity + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::CompletionCreateParams::Verbosity) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let( + :low, + OpenAI::Chat::CompletionCreateParams::Verbosity::TaggedSymbol + ) + MEDIUM = + T.let( + :medium, + OpenAI::Chat::CompletionCreateParams::Verbosity::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Chat::CompletionCreateParams::Verbosity::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::CompletionCreateParams::Verbosity::TaggedSymbol + ] + ) + end + def self.values + end + end + + class WebSearchOptions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::CompletionCreateParams::WebSearchOptions, + OpenAI::Internal::AnyHash + ) + end + + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. + sig do + returns( + T.nilable( + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize::OrSymbol + ) + ) + end + attr_reader :search_context_size + + sig do + params( + search_context_size: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize::OrSymbol + ).void + end + attr_writer :search_context_size + + # Approximate location parameters for the search. + sig do + returns( + T.nilable( + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation + ) + ) + end + attr_reader :user_location + + sig do + params( + user_location: + T.nilable( + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::OrHash + ) + ).void + end + attr_writer :user_location + + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + sig do + params( + search_context_size: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize::OrSymbol, + user_location: + T.nilable( + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. + search_context_size: nil, + # Approximate location parameters for the search. + user_location: nil + ) + end + + sig do + override.returns( + { + search_context_size: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize::OrSymbol, + user_location: + T.nilable( + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation + ) + } + ) + end + def to_hash + end + + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. + module SearchContextSize + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let( + :low, + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize::TaggedSymbol + ) + MEDIUM = + T.let( + :medium, + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize::TaggedSymbol + ] + ) + end + def self.values + end + end + + class UserLocation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation, + OpenAI::Internal::AnyHash + ) + end + + # Approximate location parameters for the search. + sig do + returns( + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate + ) + end + attr_reader :approximate + + sig do + params( + approximate: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate::OrHash + ).void + end + attr_writer :approximate + + # The type of location approximation. Always `approximate`. + sig { returns(Symbol) } + attr_accessor :type + + # Approximate location parameters for the search. + sig do + params( + approximate: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Approximate location parameters for the search. + approximate:, + # The type of location approximation. Always `approximate`. + type: :approximate + ) + end + + sig do + override.returns( + { + approximate: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, + type: Symbol + } + ) + end + def to_hash + end + + class Approximate < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, + OpenAI::Internal::AnyHash + ) + end + + # Free text input for the city of the user, e.g. `San Francisco`. + sig { returns(T.nilable(String)) } + attr_reader :city + + sig { params(city: String).void } + attr_writer :city + + # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + # the user, e.g. `US`. + sig { returns(T.nilable(String)) } + attr_reader :country + + sig { params(country: String).void } + attr_writer :country + + # Free text input for the region of the user, e.g. `California`. + sig { returns(T.nilable(String)) } + attr_reader :region + + sig { params(region: String).void } + attr_writer :region + + # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + # user, e.g. `America/Los_Angeles`. + sig { returns(T.nilable(String)) } + attr_reader :timezone + + sig { params(timezone: String).void } + attr_writer :timezone + + # Approximate location parameters for the search. + sig do + params( + city: String, + country: String, + region: String, + timezone: String + ).returns(T.attached_class) + end + def self.new( + # Free text input for the city of the user, e.g. `San Francisco`. + city: nil, + # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + # the user, e.g. `US`. + country: nil, + # Free text input for the region of the user, e.g. `California`. + region: nil, + # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + # user, e.g. `America/Los_Angeles`. + timezone: nil + ) + end + + sig do + override.returns( + { + city: String, + country: String, + region: String, + timezone: String + } + ) + end + def to_hash + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/completion_delete_params.rbi b/rbi/openai/models/chat/completion_delete_params.rbi new file mode 100644 index 00000000..1ab23698 --- /dev/null +++ b/rbi/openai/models/chat/completion_delete_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Chat + class CompletionDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::CompletionDeleteParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/completion_list_params.rbi b/rbi/openai/models/chat/completion_list_params.rbi new file mode 100644 index 00000000..cb92d858 --- /dev/null +++ b/rbi/openai/models/chat/completion_list_params.rbi @@ -0,0 +1,131 @@ +# typed: strong + +module OpenAI + module Models + module Chat + class CompletionListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Chat::CompletionListParams, OpenAI::Internal::AnyHash) + end + + # Identifier for the last chat completion from the previous pagination request. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Number of Chat Completions to retrieve. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # A list of metadata keys to filter the Chat Completions by. Example: + # + # `metadata[key1]=value1&metadata[key2]=value2` + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The model used to generate the Chat Completions. + sig { returns(T.nilable(String)) } + attr_reader :model + + sig { params(model: String).void } + attr_writer :model + + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + sig do + returns( + T.nilable(OpenAI::Chat::CompletionListParams::Order::OrSymbol) + ) + end + attr_reader :order + + sig do + params( + order: OpenAI::Chat::CompletionListParams::Order::OrSymbol + ).void + end + attr_writer :order + + sig do + params( + after: String, + limit: Integer, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + order: OpenAI::Chat::CompletionListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Identifier for the last chat completion from the previous pagination request. + after: nil, + # Number of Chat Completions to retrieve. + limit: nil, + # A list of metadata keys to filter the Chat Completions by. Example: + # + # `metadata[key1]=value1&metadata[key2]=value2` + metadata: nil, + # The model used to generate the Chat Completions. + model: nil, + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + order: OpenAI::Chat::CompletionListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::CompletionListParams::Order) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let(:asc, OpenAI::Chat::CompletionListParams::Order::TaggedSymbol) + DESC = + T.let( + :desc, + OpenAI::Chat::CompletionListParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Chat::CompletionListParams::Order::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/completion_retrieve_params.rbi b/rbi/openai/models/chat/completion_retrieve_params.rbi new file mode 100644 index 00000000..b1ece45b --- /dev/null +++ b/rbi/openai/models/chat/completion_retrieve_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Chat + class CompletionRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::CompletionRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/completion_update_params.rbi b/rbi/openai/models/chat/completion_update_params.rbi new file mode 100644 index 00000000..7897528d --- /dev/null +++ b/rbi/openai/models/chat/completion_update_params.rbi @@ -0,0 +1,58 @@ +# typed: strong + +module OpenAI + module Models + module Chat + class CompletionUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::CompletionUpdateParams, + OpenAI::Internal::AnyHash + ) + end + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + request_options: {} + ) + end + + sig do + override.returns( + { + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/completions/message_list_params.rbi b/rbi/openai/models/chat/completions/message_list_params.rbi new file mode 100644 index 00000000..2f78c626 --- /dev/null +++ b/rbi/openai/models/chat/completions/message_list_params.rbi @@ -0,0 +1,126 @@ +# typed: strong + +module OpenAI + module Models + module Chat + module Completions + class MessageListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::Completions::MessageListParams, + OpenAI::Internal::AnyHash + ) + end + + # Identifier for the last message from the previous pagination request. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Number of messages to retrieve. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + # for descending order. Defaults to `asc`. + sig do + returns( + T.nilable( + OpenAI::Chat::Completions::MessageListParams::Order::OrSymbol + ) + ) + end + attr_reader :order + + sig do + params( + order: + OpenAI::Chat::Completions::MessageListParams::Order::OrSymbol + ).void + end + attr_writer :order + + sig do + params( + after: String, + limit: Integer, + order: + OpenAI::Chat::Completions::MessageListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Identifier for the last message from the previous pagination request. + after: nil, + # Number of messages to retrieve. + limit: nil, + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + # for descending order. Defaults to `asc`. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + order: + OpenAI::Chat::Completions::MessageListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + # for descending order. Defaults to `asc`. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::Completions::MessageListParams::Order + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let( + :asc, + OpenAI::Chat::Completions::MessageListParams::Order::TaggedSymbol + ) + DESC = + T.let( + :desc, + OpenAI::Chat::Completions::MessageListParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::Completions::MessageListParams::Order::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/chat_model.rbi b/rbi/openai/models/chat_model.rbi new file mode 100644 index 00000000..7ba1f29a --- /dev/null +++ b/rbi/openai/models/chat_model.rbi @@ -0,0 +1,135 @@ +# typed: strong + +module OpenAI + module Models + module ChatModel + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ChatModel) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + GPT_5 = T.let(:"gpt-5", OpenAI::ChatModel::TaggedSymbol) + GPT_5_MINI = T.let(:"gpt-5-mini", OpenAI::ChatModel::TaggedSymbol) + GPT_5_NANO = T.let(:"gpt-5-nano", OpenAI::ChatModel::TaggedSymbol) + GPT_5_2025_08_07 = + T.let(:"gpt-5-2025-08-07", OpenAI::ChatModel::TaggedSymbol) + GPT_5_MINI_2025_08_07 = + T.let(:"gpt-5-mini-2025-08-07", OpenAI::ChatModel::TaggedSymbol) + GPT_5_NANO_2025_08_07 = + T.let(:"gpt-5-nano-2025-08-07", OpenAI::ChatModel::TaggedSymbol) + GPT_5_CHAT_LATEST = + T.let(:"gpt-5-chat-latest", OpenAI::ChatModel::TaggedSymbol) + GPT_4_1 = T.let(:"gpt-4.1", OpenAI::ChatModel::TaggedSymbol) + GPT_4_1_MINI = T.let(:"gpt-4.1-mini", OpenAI::ChatModel::TaggedSymbol) + GPT_4_1_NANO = T.let(:"gpt-4.1-nano", OpenAI::ChatModel::TaggedSymbol) + GPT_4_1_2025_04_14 = + T.let(:"gpt-4.1-2025-04-14", OpenAI::ChatModel::TaggedSymbol) + GPT_4_1_MINI_2025_04_14 = + T.let(:"gpt-4.1-mini-2025-04-14", OpenAI::ChatModel::TaggedSymbol) + GPT_4_1_NANO_2025_04_14 = + T.let(:"gpt-4.1-nano-2025-04-14", OpenAI::ChatModel::TaggedSymbol) + O4_MINI = T.let(:"o4-mini", OpenAI::ChatModel::TaggedSymbol) + O4_MINI_2025_04_16 = + T.let(:"o4-mini-2025-04-16", OpenAI::ChatModel::TaggedSymbol) + O3 = T.let(:o3, OpenAI::ChatModel::TaggedSymbol) + O3_2025_04_16 = T.let(:"o3-2025-04-16", OpenAI::ChatModel::TaggedSymbol) + O3_MINI = T.let(:"o3-mini", OpenAI::ChatModel::TaggedSymbol) + O3_MINI_2025_01_31 = + T.let(:"o3-mini-2025-01-31", OpenAI::ChatModel::TaggedSymbol) + O1 = T.let(:o1, OpenAI::ChatModel::TaggedSymbol) + O1_2024_12_17 = T.let(:"o1-2024-12-17", OpenAI::ChatModel::TaggedSymbol) + O1_PREVIEW = T.let(:"o1-preview", OpenAI::ChatModel::TaggedSymbol) + O1_PREVIEW_2024_09_12 = + T.let(:"o1-preview-2024-09-12", OpenAI::ChatModel::TaggedSymbol) + O1_MINI = T.let(:"o1-mini", OpenAI::ChatModel::TaggedSymbol) + O1_MINI_2024_09_12 = + T.let(:"o1-mini-2024-09-12", OpenAI::ChatModel::TaggedSymbol) + GPT_4O = T.let(:"gpt-4o", OpenAI::ChatModel::TaggedSymbol) + GPT_4O_2024_11_20 = + T.let(:"gpt-4o-2024-11-20", OpenAI::ChatModel::TaggedSymbol) + GPT_4O_2024_08_06 = + T.let(:"gpt-4o-2024-08-06", OpenAI::ChatModel::TaggedSymbol) + GPT_4O_2024_05_13 = + T.let(:"gpt-4o-2024-05-13", OpenAI::ChatModel::TaggedSymbol) + GPT_4O_AUDIO_PREVIEW = + T.let(:"gpt-4o-audio-preview", OpenAI::ChatModel::TaggedSymbol) + GPT_4O_AUDIO_PREVIEW_2024_10_01 = + T.let( + :"gpt-4o-audio-preview-2024-10-01", + OpenAI::ChatModel::TaggedSymbol + ) + GPT_4O_AUDIO_PREVIEW_2024_12_17 = + T.let( + :"gpt-4o-audio-preview-2024-12-17", + OpenAI::ChatModel::TaggedSymbol + ) + GPT_4O_AUDIO_PREVIEW_2025_06_03 = + T.let( + :"gpt-4o-audio-preview-2025-06-03", + OpenAI::ChatModel::TaggedSymbol + ) + GPT_4O_MINI_AUDIO_PREVIEW = + T.let(:"gpt-4o-mini-audio-preview", OpenAI::ChatModel::TaggedSymbol) + GPT_4O_MINI_AUDIO_PREVIEW_2024_12_17 = + T.let( + :"gpt-4o-mini-audio-preview-2024-12-17", + OpenAI::ChatModel::TaggedSymbol + ) + GPT_4O_SEARCH_PREVIEW = + T.let(:"gpt-4o-search-preview", OpenAI::ChatModel::TaggedSymbol) + GPT_4O_MINI_SEARCH_PREVIEW = + T.let(:"gpt-4o-mini-search-preview", OpenAI::ChatModel::TaggedSymbol) + GPT_4O_SEARCH_PREVIEW_2025_03_11 = + T.let( + :"gpt-4o-search-preview-2025-03-11", + OpenAI::ChatModel::TaggedSymbol + ) + GPT_4O_MINI_SEARCH_PREVIEW_2025_03_11 = + T.let( + :"gpt-4o-mini-search-preview-2025-03-11", + OpenAI::ChatModel::TaggedSymbol + ) + CHATGPT_4O_LATEST = + T.let(:"chatgpt-4o-latest", OpenAI::ChatModel::TaggedSymbol) + CODEX_MINI_LATEST = + T.let(:"codex-mini-latest", OpenAI::ChatModel::TaggedSymbol) + GPT_4O_MINI = T.let(:"gpt-4o-mini", OpenAI::ChatModel::TaggedSymbol) + GPT_4O_MINI_2024_07_18 = + T.let(:"gpt-4o-mini-2024-07-18", OpenAI::ChatModel::TaggedSymbol) + GPT_4_TURBO = T.let(:"gpt-4-turbo", OpenAI::ChatModel::TaggedSymbol) + GPT_4_TURBO_2024_04_09 = + T.let(:"gpt-4-turbo-2024-04-09", OpenAI::ChatModel::TaggedSymbol) + GPT_4_0125_PREVIEW = + T.let(:"gpt-4-0125-preview", OpenAI::ChatModel::TaggedSymbol) + GPT_4_TURBO_PREVIEW = + T.let(:"gpt-4-turbo-preview", OpenAI::ChatModel::TaggedSymbol) + GPT_4_1106_PREVIEW = + T.let(:"gpt-4-1106-preview", OpenAI::ChatModel::TaggedSymbol) + GPT_4_VISION_PREVIEW = + T.let(:"gpt-4-vision-preview", OpenAI::ChatModel::TaggedSymbol) + GPT_4 = T.let(:"gpt-4", OpenAI::ChatModel::TaggedSymbol) + GPT_4_0314 = T.let(:"gpt-4-0314", OpenAI::ChatModel::TaggedSymbol) + GPT_4_0613 = T.let(:"gpt-4-0613", OpenAI::ChatModel::TaggedSymbol) + GPT_4_32K = T.let(:"gpt-4-32k", OpenAI::ChatModel::TaggedSymbol) + GPT_4_32K_0314 = T.let(:"gpt-4-32k-0314", OpenAI::ChatModel::TaggedSymbol) + GPT_4_32K_0613 = T.let(:"gpt-4-32k-0613", OpenAI::ChatModel::TaggedSymbol) + GPT_3_5_TURBO = T.let(:"gpt-3.5-turbo", OpenAI::ChatModel::TaggedSymbol) + GPT_3_5_TURBO_16K = + T.let(:"gpt-3.5-turbo-16k", OpenAI::ChatModel::TaggedSymbol) + GPT_3_5_TURBO_0301 = + T.let(:"gpt-3.5-turbo-0301", OpenAI::ChatModel::TaggedSymbol) + GPT_3_5_TURBO_0613 = + T.let(:"gpt-3.5-turbo-0613", OpenAI::ChatModel::TaggedSymbol) + GPT_3_5_TURBO_1106 = + T.let(:"gpt-3.5-turbo-1106", OpenAI::ChatModel::TaggedSymbol) + GPT_3_5_TURBO_0125 = + T.let(:"gpt-3.5-turbo-0125", OpenAI::ChatModel::TaggedSymbol) + GPT_3_5_TURBO_16K_0613 = + T.let(:"gpt-3.5-turbo-16k-0613", OpenAI::ChatModel::TaggedSymbol) + + sig { override.returns(T::Array[OpenAI::ChatModel::TaggedSymbol]) } + def self.values + end + end + end +end diff --git a/rbi/openai/models/comparison_filter.rbi b/rbi/openai/models/comparison_filter.rbi new file mode 100644 index 00000000..9ae08eed --- /dev/null +++ b/rbi/openai/models/comparison_filter.rbi @@ -0,0 +1,116 @@ +# typed: strong + +module OpenAI + module Models + class ComparisonFilter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ComparisonFilter, OpenAI::Internal::AnyHash) + end + + # The key to compare against the value. + sig { returns(String) } + attr_accessor :key + + # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + # + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal + sig { returns(OpenAI::ComparisonFilter::Type::OrSymbol) } + attr_accessor :type + + # The value to compare against the attribute key; supports string, number, or + # boolean types. + sig { returns(OpenAI::ComparisonFilter::Value::Variants) } + attr_accessor :value + + # A filter used to compare a specified attribute key to a given value using a + # defined comparison operation. + sig do + params( + key: String, + type: OpenAI::ComparisonFilter::Type::OrSymbol, + value: OpenAI::ComparisonFilter::Value::Variants + ).returns(T.attached_class) + end + def self.new( + # The key to compare against the value. + key:, + # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + # + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal + type:, + # The value to compare against the attribute key; supports string, number, or + # boolean types. + value: + ) + end + + sig do + override.returns( + { + key: String, + type: OpenAI::ComparisonFilter::Type::OrSymbol, + value: OpenAI::ComparisonFilter::Value::Variants + } + ) + end + def to_hash + end + + # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + # + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ComparisonFilter::Type) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EQ = T.let(:eq, OpenAI::ComparisonFilter::Type::TaggedSymbol) + NE = T.let(:ne, OpenAI::ComparisonFilter::Type::TaggedSymbol) + GT = T.let(:gt, OpenAI::ComparisonFilter::Type::TaggedSymbol) + GTE = T.let(:gte, OpenAI::ComparisonFilter::Type::TaggedSymbol) + LT = T.let(:lt, OpenAI::ComparisonFilter::Type::TaggedSymbol) + LTE = T.let(:lte, OpenAI::ComparisonFilter::Type::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ComparisonFilter::Type::TaggedSymbol] + ) + end + def self.values + end + end + + # The value to compare against the attribute key; supports string, number, or + # boolean types. + module Value + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(String, Float, T::Boolean) } + + sig do + override.returns(T::Array[OpenAI::ComparisonFilter::Value::Variants]) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/completion.rbi b/rbi/openai/models/completion.rbi new file mode 100644 index 00000000..b82f5786 --- /dev/null +++ b/rbi/openai/models/completion.rbi @@ -0,0 +1,97 @@ +# typed: strong + +module OpenAI + module Models + class Completion < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::Completion, OpenAI::Internal::AnyHash) } + + # A unique identifier for the completion. + sig { returns(String) } + attr_accessor :id + + # The list of completion choices the model generated for the input prompt. + sig { returns(T::Array[OpenAI::CompletionChoice]) } + attr_accessor :choices + + # The Unix timestamp (in seconds) of when the completion was created. + sig { returns(Integer) } + attr_accessor :created + + # The model used for completion. + sig { returns(String) } + attr_accessor :model + + # The object type, which is always "text_completion" + sig { returns(Symbol) } + attr_accessor :object + + # This fingerprint represents the backend configuration that the model runs with. + # + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. + sig { returns(T.nilable(String)) } + attr_reader :system_fingerprint + + sig { params(system_fingerprint: String).void } + attr_writer :system_fingerprint + + # Usage statistics for the completion request. + sig { returns(T.nilable(OpenAI::CompletionUsage)) } + attr_reader :usage + + sig { params(usage: OpenAI::CompletionUsage::OrHash).void } + attr_writer :usage + + # Represents a completion response from the API. Note: both the streamed and + # non-streamed response objects share the same shape (unlike the chat endpoint). + sig do + params( + id: String, + choices: T::Array[OpenAI::CompletionChoice::OrHash], + created: Integer, + model: String, + system_fingerprint: String, + usage: OpenAI::CompletionUsage::OrHash, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # A unique identifier for the completion. + id:, + # The list of completion choices the model generated for the input prompt. + choices:, + # The Unix timestamp (in seconds) of when the completion was created. + created:, + # The model used for completion. + model:, + # This fingerprint represents the backend configuration that the model runs with. + # + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. + system_fingerprint: nil, + # Usage statistics for the completion request. + usage: nil, + # The object type, which is always "text_completion" + object: :text_completion + ) + end + + sig do + override.returns( + { + id: String, + choices: T::Array[OpenAI::CompletionChoice], + created: Integer, + model: String, + object: Symbol, + system_fingerprint: String, + usage: OpenAI::CompletionUsage + } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/completion_choice.rbi b/rbi/openai/models/completion_choice.rbi new file mode 100644 index 00000000..25f173ec --- /dev/null +++ b/rbi/openai/models/completion_choice.rbi @@ -0,0 +1,158 @@ +# typed: strong + +module OpenAI + module Models + class CompletionChoice < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::CompletionChoice, OpenAI::Internal::AnyHash) + end + + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. + sig { returns(OpenAI::CompletionChoice::FinishReason::TaggedSymbol) } + attr_accessor :finish_reason + + sig { returns(Integer) } + attr_accessor :index + + sig { returns(T.nilable(OpenAI::CompletionChoice::Logprobs)) } + attr_reader :logprobs + + sig do + params( + logprobs: T.nilable(OpenAI::CompletionChoice::Logprobs::OrHash) + ).void + end + attr_writer :logprobs + + sig { returns(String) } + attr_accessor :text + + sig do + params( + finish_reason: OpenAI::CompletionChoice::FinishReason::OrSymbol, + index: Integer, + logprobs: T.nilable(OpenAI::CompletionChoice::Logprobs::OrHash), + text: String + ).returns(T.attached_class) + end + def self.new( + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. + finish_reason:, + index:, + logprobs:, + text: + ) + end + + sig do + override.returns( + { + finish_reason: OpenAI::CompletionChoice::FinishReason::TaggedSymbol, + index: Integer, + logprobs: T.nilable(OpenAI::CompletionChoice::Logprobs), + text: String + } + ) + end + def to_hash + end + + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. + module FinishReason + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::CompletionChoice::FinishReason) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + STOP = + T.let(:stop, OpenAI::CompletionChoice::FinishReason::TaggedSymbol) + LENGTH = + T.let(:length, OpenAI::CompletionChoice::FinishReason::TaggedSymbol) + CONTENT_FILTER = + T.let( + :content_filter, + OpenAI::CompletionChoice::FinishReason::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::CompletionChoice::FinishReason::TaggedSymbol] + ) + end + def self.values + end + end + + class Logprobs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::CompletionChoice::Logprobs, OpenAI::Internal::AnyHash) + end + + sig { returns(T.nilable(T::Array[Integer])) } + attr_reader :text_offset + + sig { params(text_offset: T::Array[Integer]).void } + attr_writer :text_offset + + sig { returns(T.nilable(T::Array[Float])) } + attr_reader :token_logprobs + + sig { params(token_logprobs: T::Array[Float]).void } + attr_writer :token_logprobs + + sig { returns(T.nilable(T::Array[String])) } + attr_reader :tokens + + sig { params(tokens: T::Array[String]).void } + attr_writer :tokens + + sig { returns(T.nilable(T::Array[T::Hash[Symbol, Float]])) } + attr_reader :top_logprobs + + sig { params(top_logprobs: T::Array[T::Hash[Symbol, Float]]).void } + attr_writer :top_logprobs + + sig do + params( + text_offset: T::Array[Integer], + token_logprobs: T::Array[Float], + tokens: T::Array[String], + top_logprobs: T::Array[T::Hash[Symbol, Float]] + ).returns(T.attached_class) + end + def self.new( + text_offset: nil, + token_logprobs: nil, + tokens: nil, + top_logprobs: nil + ) + end + + sig do + override.returns( + { + text_offset: T::Array[Integer], + token_logprobs: T::Array[Float], + tokens: T::Array[String], + top_logprobs: T::Array[T::Hash[Symbol, Float]] + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/completion_create_params.rbi b/rbi/openai/models/completion_create_params.rbi new file mode 100644 index 00000000..6a8a8a87 --- /dev/null +++ b/rbi/openai/models/completion_create_params.rbi @@ -0,0 +1,445 @@ +# typed: strong + +module OpenAI + module Models + class CompletionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::CompletionCreateParams, OpenAI::Internal::AnyHash) + end + + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + sig do + returns(T.any(String, OpenAI::CompletionCreateParams::Model::OrSymbol)) + end + attr_accessor :model + + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. + sig do + returns(T.nilable(OpenAI::CompletionCreateParams::Prompt::Variants)) + end + attr_accessor :prompt + + # Generates `best_of` completions server-side and returns the "best" (the one with + # the highest log probability per token). Results cannot be streamed. + # + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. + sig { returns(T.nilable(Integer)) } + attr_accessor :best_of + + # Echo back the prompt in addition to the completion + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :echo + + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + sig { returns(T.nilable(Float)) } + attr_accessor :frequency_penalty + + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. + # + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. + sig { returns(T.nilable(T::Hash[Symbol, Integer])) } + attr_accessor :logit_bias + + # Include the log probabilities on the `logprobs` most likely output tokens, as + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. + # + # The maximum value for `logprobs` is 5. + sig { returns(T.nilable(Integer)) } + attr_accessor :logprobs + + # The maximum number of [tokens](/tokenizer) that can be generated in the + # completion. + # + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_tokens + + # How many completions to generate for each prompt. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. + sig { returns(T.nilable(Integer)) } + attr_accessor :n + + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + sig { returns(T.nilable(Float)) } + attr_accessor :presence_penalty + + # If specified, our system will make a best effort to sample deterministically, + # such that repeated requests with the same `seed` and parameters should return + # the same result. + # + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. + sig { returns(T.nilable(Integer)) } + attr_accessor :seed + + # Not supported with latest reasoning models `o3` and `o4-mini`. + # + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. + sig { returns(T.nilable(OpenAI::CompletionCreateParams::Stop::Variants)) } + attr_accessor :stop + + # Options for streaming response. Only set this when you set `stream: true`. + sig { returns(T.nilable(OpenAI::Chat::ChatCompletionStreamOptions)) } + attr_reader :stream_options + + sig do + params( + stream_options: + T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash) + ).void + end + attr_writer :stream_options + + # The suffix that comes after a completion of inserted text. + # + # This parameter is only supported for `gpt-3.5-turbo-instruct`. + sig { returns(T.nilable(String)) } + attr_accessor :suffix + + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + # + # We generally recommend altering this or `top_p` but not both. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + sig { returns(T.nilable(String)) } + attr_reader :user + + sig { params(user: String).void } + attr_writer :user + + sig do + params( + model: T.any(String, OpenAI::CompletionCreateParams::Model::OrSymbol), + prompt: T.nilable(OpenAI::CompletionCreateParams::Prompt::Variants), + best_of: T.nilable(Integer), + echo: T.nilable(T::Boolean), + frequency_penalty: T.nilable(Float), + logit_bias: T.nilable(T::Hash[Symbol, Integer]), + logprobs: T.nilable(Integer), + max_tokens: T.nilable(Integer), + n: T.nilable(Integer), + presence_penalty: T.nilable(Float), + seed: T.nilable(Integer), + stop: T.nilable(OpenAI::CompletionCreateParams::Stop::Variants), + stream_options: + T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash), + suffix: T.nilable(String), + temperature: T.nilable(Float), + top_p: T.nilable(Float), + user: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model:, + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. + prompt:, + # Generates `best_of` completions server-side and returns the "best" (the one with + # the highest log probability per token). Results cannot be streamed. + # + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. + best_of: nil, + # Echo back the prompt in addition to the completion + echo: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + frequency_penalty: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. + # + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. + logit_bias: nil, + # Include the log probabilities on the `logprobs` most likely output tokens, as + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. + # + # The maximum value for `logprobs` is 5. + logprobs: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the + # completion. + # + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. + max_tokens: nil, + # How many completions to generate for each prompt. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. + n: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + presence_penalty: nil, + # If specified, our system will make a best effort to sample deterministically, + # such that repeated requests with the same `seed` and parameters should return + # the same result. + # + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. + seed: nil, + # Not supported with latest reasoning models `o3` and `o4-mini`. + # + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. + stop: nil, + # Options for streaming response. Only set this when you set `stream: true`. + stream_options: nil, + # The suffix that comes after a completion of inserted text. + # + # This parameter is only supported for `gpt-3.5-turbo-instruct`. + suffix: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + # + # We generally recommend altering this or `top_p` but not both. + temperature: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + model: + T.any(String, OpenAI::CompletionCreateParams::Model::OrSymbol), + prompt: T.nilable(OpenAI::CompletionCreateParams::Prompt::Variants), + best_of: T.nilable(Integer), + echo: T.nilable(T::Boolean), + frequency_penalty: T.nilable(Float), + logit_bias: T.nilable(T::Hash[Symbol, Integer]), + logprobs: T.nilable(Integer), + max_tokens: T.nilable(Integer), + n: T.nilable(Integer), + presence_penalty: T.nilable(Float), + seed: T.nilable(Integer), + stop: T.nilable(OpenAI::CompletionCreateParams::Stop::Variants), + stream_options: + T.nilable(OpenAI::Chat::ChatCompletionStreamOptions), + suffix: T.nilable(String), + temperature: T.nilable(Float), + top_p: T.nilable(Float), + user: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any(String, OpenAI::CompletionCreateParams::Model::TaggedSymbol) + end + + sig do + override.returns( + T::Array[OpenAI::CompletionCreateParams::Model::Variants] + ) + end + def self.variants + end + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::CompletionCreateParams::Model) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + GPT_3_5_TURBO_INSTRUCT = + T.let( + :"gpt-3.5-turbo-instruct", + OpenAI::CompletionCreateParams::Model::TaggedSymbol + ) + DAVINCI_002 = + T.let( + :"davinci-002", + OpenAI::CompletionCreateParams::Model::TaggedSymbol + ) + BABBAGE_002 = + T.let( + :"babbage-002", + OpenAI::CompletionCreateParams::Model::TaggedSymbol + ) + end + + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. + module Prompt + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[String], + T::Array[Integer], + T::Array[T::Array[Integer]] + ) + end + + sig do + override.returns( + T::Array[OpenAI::CompletionCreateParams::Prompt::Variants] + ) + end + def self.variants + end + + StringArray = + T.let( + OpenAI::Internal::Type::ArrayOf[String], + OpenAI::Internal::Type::Converter + ) + + IntegerArray = + T.let( + OpenAI::Internal::Type::ArrayOf[Integer], + OpenAI::Internal::Type::Converter + ) + + ArrayOfToken2DArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::ArrayOf[Integer] + ], + OpenAI::Internal::Type::Converter + ) + end + + # Not supported with latest reasoning models `o3` and `o4-mini`. + # + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. + module Stop + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.nilable(T.any(String, T::Array[String])) } + + sig do + override.returns( + T::Array[OpenAI::CompletionCreateParams::Stop::Variants] + ) + end + def self.variants + end + + StringArray = + T.let( + OpenAI::Internal::Type::ArrayOf[String], + OpenAI::Internal::Type::Converter + ) + end + end + end +end diff --git a/rbi/openai/models/completion_usage.rbi b/rbi/openai/models/completion_usage.rbi new file mode 100644 index 00000000..f64ef423 --- /dev/null +++ b/rbi/openai/models/completion_usage.rbi @@ -0,0 +1,215 @@ +# typed: strong + +module OpenAI + module Models + class CompletionUsage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::CompletionUsage, OpenAI::Internal::AnyHash) + end + + # Number of tokens in the generated completion. + sig { returns(Integer) } + attr_accessor :completion_tokens + + # Number of tokens in the prompt. + sig { returns(Integer) } + attr_accessor :prompt_tokens + + # Total number of tokens used in the request (prompt + completion). + sig { returns(Integer) } + attr_accessor :total_tokens + + # Breakdown of tokens used in a completion. + sig do + returns(T.nilable(OpenAI::CompletionUsage::CompletionTokensDetails)) + end + attr_reader :completion_tokens_details + + sig do + params( + completion_tokens_details: + OpenAI::CompletionUsage::CompletionTokensDetails::OrHash + ).void + end + attr_writer :completion_tokens_details + + # Breakdown of tokens used in the prompt. + sig { returns(T.nilable(OpenAI::CompletionUsage::PromptTokensDetails)) } + attr_reader :prompt_tokens_details + + sig do + params( + prompt_tokens_details: + OpenAI::CompletionUsage::PromptTokensDetails::OrHash + ).void + end + attr_writer :prompt_tokens_details + + # Usage statistics for the completion request. + sig do + params( + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer, + completion_tokens_details: + OpenAI::CompletionUsage::CompletionTokensDetails::OrHash, + prompt_tokens_details: + OpenAI::CompletionUsage::PromptTokensDetails::OrHash + ).returns(T.attached_class) + end + def self.new( + # Number of tokens in the generated completion. + completion_tokens:, + # Number of tokens in the prompt. + prompt_tokens:, + # Total number of tokens used in the request (prompt + completion). + total_tokens:, + # Breakdown of tokens used in a completion. + completion_tokens_details: nil, + # Breakdown of tokens used in the prompt. + prompt_tokens_details: nil + ) + end + + sig do + override.returns( + { + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer, + completion_tokens_details: + OpenAI::CompletionUsage::CompletionTokensDetails, + prompt_tokens_details: OpenAI::CompletionUsage::PromptTokensDetails + } + ) + end + def to_hash + end + + class CompletionTokensDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::CompletionUsage::CompletionTokensDetails, + OpenAI::Internal::AnyHash + ) + end + + # When using Predicted Outputs, the number of tokens in the prediction that + # appeared in the completion. + sig { returns(T.nilable(Integer)) } + attr_reader :accepted_prediction_tokens + + sig { params(accepted_prediction_tokens: Integer).void } + attr_writer :accepted_prediction_tokens + + # Audio input tokens generated by the model. + sig { returns(T.nilable(Integer)) } + attr_reader :audio_tokens + + sig { params(audio_tokens: Integer).void } + attr_writer :audio_tokens + + # Tokens generated by the model for reasoning. + sig { returns(T.nilable(Integer)) } + attr_reader :reasoning_tokens + + sig { params(reasoning_tokens: Integer).void } + attr_writer :reasoning_tokens + + # When using Predicted Outputs, the number of tokens in the prediction that did + # not appear in the completion. However, like reasoning tokens, these tokens are + # still counted in the total completion tokens for purposes of billing, output, + # and context window limits. + sig { returns(T.nilable(Integer)) } + attr_reader :rejected_prediction_tokens + + sig { params(rejected_prediction_tokens: Integer).void } + attr_writer :rejected_prediction_tokens + + # Breakdown of tokens used in a completion. + sig do + params( + accepted_prediction_tokens: Integer, + audio_tokens: Integer, + reasoning_tokens: Integer, + rejected_prediction_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # When using Predicted Outputs, the number of tokens in the prediction that + # appeared in the completion. + accepted_prediction_tokens: nil, + # Audio input tokens generated by the model. + audio_tokens: nil, + # Tokens generated by the model for reasoning. + reasoning_tokens: nil, + # When using Predicted Outputs, the number of tokens in the prediction that did + # not appear in the completion. However, like reasoning tokens, these tokens are + # still counted in the total completion tokens for purposes of billing, output, + # and context window limits. + rejected_prediction_tokens: nil + ) + end + + sig do + override.returns( + { + accepted_prediction_tokens: Integer, + audio_tokens: Integer, + reasoning_tokens: Integer, + rejected_prediction_tokens: Integer + } + ) + end + def to_hash + end + end + + class PromptTokensDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::CompletionUsage::PromptTokensDetails, + OpenAI::Internal::AnyHash + ) + end + + # Audio input tokens present in the prompt. + sig { returns(T.nilable(Integer)) } + attr_reader :audio_tokens + + sig { params(audio_tokens: Integer).void } + attr_writer :audio_tokens + + # Cached tokens present in the prompt. + sig { returns(T.nilable(Integer)) } + attr_reader :cached_tokens + + sig { params(cached_tokens: Integer).void } + attr_writer :cached_tokens + + # Breakdown of tokens used in the prompt. + sig do + params(audio_tokens: Integer, cached_tokens: Integer).returns( + T.attached_class + ) + end + def self.new( + # Audio input tokens present in the prompt. + audio_tokens: nil, + # Cached tokens present in the prompt. + cached_tokens: nil + ) + end + + sig do + override.returns({ audio_tokens: Integer, cached_tokens: Integer }) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/compound_filter.rbi b/rbi/openai/models/compound_filter.rbi new file mode 100644 index 00000000..99097583 --- /dev/null +++ b/rbi/openai/models/compound_filter.rbi @@ -0,0 +1,81 @@ +# typed: strong + +module OpenAI + module Models + class CompoundFilter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::CompoundFilter, OpenAI::Internal::AnyHash) + end + + # Array of filters to combine. Items can be `ComparisonFilter` or + # `CompoundFilter`. + sig { returns(T::Array[T.any(OpenAI::ComparisonFilter, T.anything)]) } + attr_accessor :filters + + # Type of operation: `and` or `or`. + sig { returns(OpenAI::CompoundFilter::Type::OrSymbol) } + attr_accessor :type + + # Combine multiple filters using `and` or `or`. + sig do + params( + filters: + T::Array[T.any(OpenAI::ComparisonFilter::OrHash, T.anything)], + type: OpenAI::CompoundFilter::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Array of filters to combine. Items can be `ComparisonFilter` or + # `CompoundFilter`. + filters:, + # Type of operation: `and` or `or`. + type: + ) + end + + sig do + override.returns( + { + filters: T::Array[T.any(OpenAI::ComparisonFilter, T.anything)], + type: OpenAI::CompoundFilter::Type::OrSymbol + } + ) + end + def to_hash + end + + # A filter used to compare a specified attribute key to a given value using a + # defined comparison operation. + module Filter + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(OpenAI::ComparisonFilter, T.anything) } + + sig do + override.returns(T::Array[OpenAI::CompoundFilter::Filter::Variants]) + end + def self.variants + end + end + + # Type of operation: `and` or `or`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::CompoundFilter::Type) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AND = T.let(:and, OpenAI::CompoundFilter::Type::TaggedSymbol) + OR = T.let(:or, OpenAI::CompoundFilter::Type::TaggedSymbol) + + sig do + override.returns(T::Array[OpenAI::CompoundFilter::Type::TaggedSymbol]) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/container_create_params.rbi b/rbi/openai/models/container_create_params.rbi new file mode 100644 index 00000000..ad4342cb --- /dev/null +++ b/rbi/openai/models/container_create_params.rbi @@ -0,0 +1,145 @@ +# typed: strong + +module OpenAI + module Models + class ContainerCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ContainerCreateParams, OpenAI::Internal::AnyHash) + end + + # Name of the container to create. + sig { returns(String) } + attr_accessor :name + + # Container expiration time in seconds relative to the 'anchor' time. + sig { returns(T.nilable(OpenAI::ContainerCreateParams::ExpiresAfter)) } + attr_reader :expires_after + + sig do + params( + expires_after: OpenAI::ContainerCreateParams::ExpiresAfter::OrHash + ).void + end + attr_writer :expires_after + + # IDs of files to copy to the container. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + sig do + params( + name: String, + expires_after: OpenAI::ContainerCreateParams::ExpiresAfter::OrHash, + file_ids: T::Array[String], + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Name of the container to create. + name:, + # Container expiration time in seconds relative to the 'anchor' time. + expires_after: nil, + # IDs of files to copy to the container. + file_ids: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + name: String, + expires_after: OpenAI::ContainerCreateParams::ExpiresAfter, + file_ids: T::Array[String], + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ContainerCreateParams::ExpiresAfter, + OpenAI::Internal::AnyHash + ) + end + + # Time anchor for the expiration time. Currently only 'last_active_at' is + # supported. + sig do + returns(OpenAI::ContainerCreateParams::ExpiresAfter::Anchor::OrSymbol) + end + attr_accessor :anchor + + sig { returns(Integer) } + attr_accessor :minutes + + # Container expiration time in seconds relative to the 'anchor' time. + sig do + params( + anchor: + OpenAI::ContainerCreateParams::ExpiresAfter::Anchor::OrSymbol, + minutes: Integer + ).returns(T.attached_class) + end + def self.new( + # Time anchor for the expiration time. Currently only 'last_active_at' is + # supported. + anchor:, + minutes: + ) + end + + sig do + override.returns( + { + anchor: + OpenAI::ContainerCreateParams::ExpiresAfter::Anchor::OrSymbol, + minutes: Integer + } + ) + end + def to_hash + end + + # Time anchor for the expiration time. Currently only 'last_active_at' is + # supported. + module Anchor + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ContainerCreateParams::ExpiresAfter::Anchor) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LAST_ACTIVE_AT = + T.let( + :last_active_at, + OpenAI::ContainerCreateParams::ExpiresAfter::Anchor::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ContainerCreateParams::ExpiresAfter::Anchor::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/container_create_response.rbi b/rbi/openai/models/container_create_response.rbi new file mode 100644 index 00000000..4552eb5f --- /dev/null +++ b/rbi/openai/models/container_create_response.rbi @@ -0,0 +1,192 @@ +# typed: strong + +module OpenAI + module Models + class ContainerCreateResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::ContainerCreateResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the container. + sig { returns(String) } + attr_accessor :id + + # Unix timestamp (in seconds) when the container was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Name of the container. + sig { returns(String) } + attr_accessor :name + + # The type of this object. + sig { returns(String) } + attr_accessor :object + + # Status of the container (e.g., active, deleted). + sig { returns(String) } + attr_accessor :status + + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + sig do + returns( + T.nilable(OpenAI::Models::ContainerCreateResponse::ExpiresAfter) + ) + end + attr_reader :expires_after + + sig do + params( + expires_after: + OpenAI::Models::ContainerCreateResponse::ExpiresAfter::OrHash + ).void + end + attr_writer :expires_after + + sig do + params( + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: + OpenAI::Models::ContainerCreateResponse::ExpiresAfter::OrHash + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the container. + id:, + # Unix timestamp (in seconds) when the container was created. + created_at:, + # Name of the container. + name:, + # The type of this object. + object:, + # Status of the container (e.g., active, deleted). + status:, + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + expires_after: nil + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter + } + ) + end + def to_hash + end + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::ContainerCreateResponse::ExpiresAfter, + OpenAI::Internal::AnyHash + ) + end + + # The reference point for the expiration. + sig do + returns( + T.nilable( + OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor::TaggedSymbol + ) + ) + end + attr_reader :anchor + + sig do + params( + anchor: + OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor::OrSymbol + ).void + end + attr_writer :anchor + + # The number of minutes after the anchor before the container expires. + sig { returns(T.nilable(Integer)) } + attr_reader :minutes + + sig { params(minutes: Integer).void } + attr_writer :minutes + + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + sig do + params( + anchor: + OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor::OrSymbol, + minutes: Integer + ).returns(T.attached_class) + end + def self.new( + # The reference point for the expiration. + anchor: nil, + # The number of minutes after the anchor before the container expires. + minutes: nil + ) + end + + sig do + override.returns( + { + anchor: + OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor::TaggedSymbol, + minutes: Integer + } + ) + end + def to_hash + end + + # The reference point for the expiration. + module Anchor + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LAST_ACTIVE_AT = + T.let( + :last_active_at, + OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/container_delete_params.rbi b/rbi/openai/models/container_delete_params.rbi new file mode 100644 index 00000000..85d0c862 --- /dev/null +++ b/rbi/openai/models/container_delete_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class ContainerDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ContainerDeleteParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/container_list_params.rbi b/rbi/openai/models/container_list_params.rbi new file mode 100644 index 00000000..3f3ebd63 --- /dev/null +++ b/rbi/openai/models/container_list_params.rbi @@ -0,0 +1,99 @@ +# typed: strong + +module OpenAI + module Models + class ContainerListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ContainerListParams, OpenAI::Internal::AnyHash) + end + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + sig { returns(T.nilable(OpenAI::ContainerListParams::Order::OrSymbol)) } + attr_reader :order + + sig { params(order: OpenAI::ContainerListParams::Order::OrSymbol).void } + attr_writer :order + + sig do + params( + after: String, + limit: Integer, + order: OpenAI::ContainerListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + order: OpenAI::ContainerListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ContainerListParams::Order) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = T.let(:asc, OpenAI::ContainerListParams::Order::TaggedSymbol) + DESC = T.let(:desc, OpenAI::ContainerListParams::Order::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ContainerListParams::Order::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/container_list_response.rbi b/rbi/openai/models/container_list_response.rbi new file mode 100644 index 00000000..4431fa79 --- /dev/null +++ b/rbi/openai/models/container_list_response.rbi @@ -0,0 +1,190 @@ +# typed: strong + +module OpenAI + module Models + class ContainerListResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::ContainerListResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the container. + sig { returns(String) } + attr_accessor :id + + # Unix timestamp (in seconds) when the container was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Name of the container. + sig { returns(String) } + attr_accessor :name + + # The type of this object. + sig { returns(String) } + attr_accessor :object + + # Status of the container (e.g., active, deleted). + sig { returns(String) } + attr_accessor :status + + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + sig do + returns(T.nilable(OpenAI::Models::ContainerListResponse::ExpiresAfter)) + end + attr_reader :expires_after + + sig do + params( + expires_after: + OpenAI::Models::ContainerListResponse::ExpiresAfter::OrHash + ).void + end + attr_writer :expires_after + + sig do + params( + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: + OpenAI::Models::ContainerListResponse::ExpiresAfter::OrHash + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the container. + id:, + # Unix timestamp (in seconds) when the container was created. + created_at:, + # Name of the container. + name:, + # The type of this object. + object:, + # Status of the container (e.g., active, deleted). + status:, + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + expires_after: nil + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter + } + ) + end + def to_hash + end + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::ContainerListResponse::ExpiresAfter, + OpenAI::Internal::AnyHash + ) + end + + # The reference point for the expiration. + sig do + returns( + T.nilable( + OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor::TaggedSymbol + ) + ) + end + attr_reader :anchor + + sig do + params( + anchor: + OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor::OrSymbol + ).void + end + attr_writer :anchor + + # The number of minutes after the anchor before the container expires. + sig { returns(T.nilable(Integer)) } + attr_reader :minutes + + sig { params(minutes: Integer).void } + attr_writer :minutes + + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + sig do + params( + anchor: + OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor::OrSymbol, + minutes: Integer + ).returns(T.attached_class) + end + def self.new( + # The reference point for the expiration. + anchor: nil, + # The number of minutes after the anchor before the container expires. + minutes: nil + ) + end + + sig do + override.returns( + { + anchor: + OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor::TaggedSymbol, + minutes: Integer + } + ) + end + def to_hash + end + + # The reference point for the expiration. + module Anchor + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LAST_ACTIVE_AT = + T.let( + :last_active_at, + OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/container_retrieve_params.rbi b/rbi/openai/models/container_retrieve_params.rbi new file mode 100644 index 00000000..6c987580 --- /dev/null +++ b/rbi/openai/models/container_retrieve_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class ContainerRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ContainerRetrieveParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/container_retrieve_response.rbi b/rbi/openai/models/container_retrieve_response.rbi new file mode 100644 index 00000000..a81e2d6a --- /dev/null +++ b/rbi/openai/models/container_retrieve_response.rbi @@ -0,0 +1,193 @@ +# typed: strong + +module OpenAI + module Models + class ContainerRetrieveResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::ContainerRetrieveResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the container. + sig { returns(String) } + attr_accessor :id + + # Unix timestamp (in seconds) when the container was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Name of the container. + sig { returns(String) } + attr_accessor :name + + # The type of this object. + sig { returns(String) } + attr_accessor :object + + # Status of the container (e.g., active, deleted). + sig { returns(String) } + attr_accessor :status + + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + sig do + returns( + T.nilable(OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter) + ) + end + attr_reader :expires_after + + sig do + params( + expires_after: + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::OrHash + ).void + end + attr_writer :expires_after + + sig do + params( + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::OrHash + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the container. + id:, + # Unix timestamp (in seconds) when the container was created. + created_at:, + # Name of the container. + name:, + # The type of this object. + object:, + # Status of the container (e.g., active, deleted). + status:, + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + expires_after: nil + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter + } + ) + end + def to_hash + end + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter, + OpenAI::Internal::AnyHash + ) + end + + # The reference point for the expiration. + sig do + returns( + T.nilable( + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor::TaggedSymbol + ) + ) + end + attr_reader :anchor + + sig do + params( + anchor: + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor::OrSymbol + ).void + end + attr_writer :anchor + + # The number of minutes after the anchor before the container expires. + sig { returns(T.nilable(Integer)) } + attr_reader :minutes + + sig { params(minutes: Integer).void } + attr_writer :minutes + + # The container will expire after this time period. The anchor is the reference + # point for the expiration. The minutes is the number of minutes after the anchor + # before the container expires. + sig do + params( + anchor: + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor::OrSymbol, + minutes: Integer + ).returns(T.attached_class) + end + def self.new( + # The reference point for the expiration. + anchor: nil, + # The number of minutes after the anchor before the container expires. + minutes: nil + ) + end + + sig do + override.returns( + { + anchor: + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor::TaggedSymbol, + minutes: Integer + } + ) + end + def to_hash + end + + # The reference point for the expiration. + module Anchor + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LAST_ACTIVE_AT = + T.let( + :last_active_at, + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/containers/file_create_params.rbi b/rbi/openai/models/containers/file_create_params.rbi new file mode 100644 index 00000000..4d752f50 --- /dev/null +++ b/rbi/openai/models/containers/file_create_params.rbi @@ -0,0 +1,62 @@ +# typed: strong + +module OpenAI + module Models + module Containers + class FileCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Containers::FileCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # The File object (not file name) to be uploaded. + sig { returns(T.nilable(OpenAI::Internal::FileInput)) } + attr_reader :file + + sig { params(file: OpenAI::Internal::FileInput).void } + attr_writer :file + + # Name of the file to create. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + sig do + params( + file: OpenAI::Internal::FileInput, + file_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The File object (not file name) to be uploaded. + file: nil, + # Name of the file to create. + file_id: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + file: OpenAI::Internal::FileInput, + file_id: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/containers/file_create_response.rbi b/rbi/openai/models/containers/file_create_response.rbi new file mode 100644 index 00000000..c39f81c6 --- /dev/null +++ b/rbi/openai/models/containers/file_create_response.rbi @@ -0,0 +1,90 @@ +# typed: strong + +module OpenAI + module Models + module Containers + class FileCreateResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Containers::FileCreateResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the file. + sig { returns(String) } + attr_accessor :id + + # Size of the file in bytes. + sig { returns(Integer) } + attr_accessor :bytes + + # The container this file belongs to. + sig { returns(String) } + attr_accessor :container_id + + # Unix timestamp (in seconds) when the file was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The type of this object (`container.file`). + sig { returns(Symbol) } + attr_accessor :object + + # Path of the file in the container. + sig { returns(String) } + attr_accessor :path + + # Source of the file (e.g., `user`, `assistant`). + sig { returns(String) } + attr_accessor :source + + sig do + params( + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + path: String, + source: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the file. + id:, + # Size of the file in bytes. + bytes:, + # The container this file belongs to. + container_id:, + # Unix timestamp (in seconds) when the file was created. + created_at:, + # Path of the file in the container. + path:, + # Source of the file (e.g., `user`, `assistant`). + source:, + # The type of this object (`container.file`). + object: :"container.file" + ) + end + + sig do + override.returns( + { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: Symbol, + path: String, + source: String + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/containers/file_delete_params.rbi b/rbi/openai/models/containers/file_delete_params.rbi new file mode 100644 index 00000000..42830150 --- /dev/null +++ b/rbi/openai/models/containers/file_delete_params.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module Containers + class FileDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Containers::FileDeleteParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :container_id + + sig do + params( + container_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(container_id:, request_options: {}) + end + + sig do + override.returns( + { container_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/containers/file_list_params.rbi b/rbi/openai/models/containers/file_list_params.rbi new file mode 100644 index 00000000..f62afa88 --- /dev/null +++ b/rbi/openai/models/containers/file_list_params.rbi @@ -0,0 +1,116 @@ +# typed: strong + +module OpenAI + module Models + module Containers + class FileListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Containers::FileListParams, OpenAI::Internal::AnyHash) + end + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + sig do + returns( + T.nilable(OpenAI::Containers::FileListParams::Order::OrSymbol) + ) + end + attr_reader :order + + sig do + params( + order: OpenAI::Containers::FileListParams::Order::OrSymbol + ).void + end + attr_writer :order + + sig do + params( + after: String, + limit: Integer, + order: OpenAI::Containers::FileListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + order: OpenAI::Containers::FileListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Containers::FileListParams::Order) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let(:asc, OpenAI::Containers::FileListParams::Order::TaggedSymbol) + DESC = + T.let( + :desc, + OpenAI::Containers::FileListParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Containers::FileListParams::Order::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/containers/file_list_response.rbi b/rbi/openai/models/containers/file_list_response.rbi new file mode 100644 index 00000000..f33ad693 --- /dev/null +++ b/rbi/openai/models/containers/file_list_response.rbi @@ -0,0 +1,90 @@ +# typed: strong + +module OpenAI + module Models + module Containers + class FileListResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Containers::FileListResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the file. + sig { returns(String) } + attr_accessor :id + + # Size of the file in bytes. + sig { returns(Integer) } + attr_accessor :bytes + + # The container this file belongs to. + sig { returns(String) } + attr_accessor :container_id + + # Unix timestamp (in seconds) when the file was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The type of this object (`container.file`). + sig { returns(Symbol) } + attr_accessor :object + + # Path of the file in the container. + sig { returns(String) } + attr_accessor :path + + # Source of the file (e.g., `user`, `assistant`). + sig { returns(String) } + attr_accessor :source + + sig do + params( + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + path: String, + source: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the file. + id:, + # Size of the file in bytes. + bytes:, + # The container this file belongs to. + container_id:, + # Unix timestamp (in seconds) when the file was created. + created_at:, + # Path of the file in the container. + path:, + # Source of the file (e.g., `user`, `assistant`). + source:, + # The type of this object (`container.file`). + object: :"container.file" + ) + end + + sig do + override.returns( + { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: Symbol, + path: String, + source: String + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/containers/file_retrieve_params.rbi b/rbi/openai/models/containers/file_retrieve_params.rbi new file mode 100644 index 00000000..0e9bfd6e --- /dev/null +++ b/rbi/openai/models/containers/file_retrieve_params.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module Containers + class FileRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Containers::FileRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :container_id + + sig do + params( + container_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(container_id:, request_options: {}) + end + + sig do + override.returns( + { container_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/containers/file_retrieve_response.rbi b/rbi/openai/models/containers/file_retrieve_response.rbi new file mode 100644 index 00000000..eea83ee4 --- /dev/null +++ b/rbi/openai/models/containers/file_retrieve_response.rbi @@ -0,0 +1,90 @@ +# typed: strong + +module OpenAI + module Models + module Containers + class FileRetrieveResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Containers::FileRetrieveResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the file. + sig { returns(String) } + attr_accessor :id + + # Size of the file in bytes. + sig { returns(Integer) } + attr_accessor :bytes + + # The container this file belongs to. + sig { returns(String) } + attr_accessor :container_id + + # Unix timestamp (in seconds) when the file was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The type of this object (`container.file`). + sig { returns(Symbol) } + attr_accessor :object + + # Path of the file in the container. + sig { returns(String) } + attr_accessor :path + + # Source of the file (e.g., `user`, `assistant`). + sig { returns(String) } + attr_accessor :source + + sig do + params( + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + path: String, + source: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the file. + id:, + # Size of the file in bytes. + bytes:, + # The container this file belongs to. + container_id:, + # Unix timestamp (in seconds) when the file was created. + created_at:, + # Path of the file in the container. + path:, + # Source of the file (e.g., `user`, `assistant`). + source:, + # The type of this object (`container.file`). + object: :"container.file" + ) + end + + sig do + override.returns( + { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: Symbol, + path: String, + source: String + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/containers/files/content_retrieve_params.rbi b/rbi/openai/models/containers/files/content_retrieve_params.rbi new file mode 100644 index 00000000..76a5f6a1 --- /dev/null +++ b/rbi/openai/models/containers/files/content_retrieve_params.rbi @@ -0,0 +1,42 @@ +# typed: strong + +module OpenAI + module Models + module Containers + module Files + class ContentRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Containers::Files::ContentRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :container_id + + sig do + params( + container_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(container_id:, request_options: {}) + end + + sig do + override.returns( + { container_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/conversations/computer_screenshot_content.rbi b/rbi/openai/models/conversations/computer_screenshot_content.rbi new file mode 100644 index 00000000..50a29357 --- /dev/null +++ b/rbi/openai/models/conversations/computer_screenshot_content.rbi @@ -0,0 +1,60 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ComputerScreenshotContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ComputerScreenshotContent, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of an uploaded file that contains the screenshot. + sig { returns(T.nilable(String)) } + attr_accessor :file_id + + # The URL of the screenshot image. + sig { returns(T.nilable(String)) } + attr_accessor :image_url + + # Specifies the event type. For a computer screenshot, this property is always set + # to `computer_screenshot`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + file_id: T.nilable(String), + image_url: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier of an uploaded file that contains the screenshot. + file_id:, + # The URL of the screenshot image. + image_url:, + # Specifies the event type. For a computer screenshot, this property is always set + # to `computer_screenshot`. + type: :computer_screenshot + ) + end + + sig do + override.returns( + { + file_id: T.nilable(String), + image_url: T.nilable(String), + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/container_file_citation_body.rbi b/rbi/openai/models/conversations/container_file_citation_body.rbi new file mode 100644 index 00000000..d828926e --- /dev/null +++ b/rbi/openai/models/conversations/container_file_citation_body.rbi @@ -0,0 +1,82 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ContainerFileCitationBody < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ContainerFileCitationBody, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the container file. + sig { returns(String) } + attr_accessor :container_id + + # The index of the last character of the container file citation in the message. + sig { returns(Integer) } + attr_accessor :end_index + + # The ID of the file. + sig { returns(String) } + attr_accessor :file_id + + # The filename of the container file cited. + sig { returns(String) } + attr_accessor :filename + + # The index of the first character of the container file citation in the message. + sig { returns(Integer) } + attr_accessor :start_index + + # The type of the container file citation. Always `container_file_citation`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + container_id: String, + end_index: Integer, + file_id: String, + filename: String, + start_index: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the container file. + container_id:, + # The index of the last character of the container file citation in the message. + end_index:, + # The ID of the file. + file_id:, + # The filename of the container file cited. + filename:, + # The index of the first character of the container file citation in the message. + start_index:, + # The type of the container file citation. Always `container_file_citation`. + type: :container_file_citation + ) + end + + sig do + override.returns( + { + container_id: String, + end_index: Integer, + file_id: String, + filename: String, + start_index: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/conversation.rbi b/rbi/openai/models/conversations/conversation.rbi new file mode 100644 index 00000000..f60e90df --- /dev/null +++ b/rbi/openai/models/conversations/conversation.rbi @@ -0,0 +1,76 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class Conversation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::Conversation, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the conversation. + sig { returns(String) } + attr_accessor :id + + # The time at which the conversation was created, measured in seconds since the + # Unix epoch. + sig { returns(Integer) } + attr_accessor :created_at + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters. + sig { returns(T.anything) } + attr_accessor :metadata + + # The object type, which is always `conversation`. + sig { returns(Symbol) } + attr_accessor :object + + sig do + params( + id: String, + created_at: Integer, + metadata: T.anything, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the conversation. + id:, + # The time at which the conversation was created, measured in seconds since the + # Unix epoch. + created_at:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters. + metadata:, + # The object type, which is always `conversation`. + object: :conversation + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + metadata: T.anything, + object: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/conversation_create_params.rbi b/rbi/openai/models/conversations/conversation_create_params.rbi new file mode 100644 index 00000000..72463e9b --- /dev/null +++ b/rbi/openai/models/conversations/conversation_create_params.rbi @@ -0,0 +1,144 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ConversationCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # Initial items to include in the conversation context. You may add up to 20 items + # at a time. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage, + OpenAI::Responses::ResponseInputItem::Message, + OpenAI::Responses::ResponseOutputMessage, + OpenAI::Responses::ResponseFileSearchToolCall, + OpenAI::Responses::ResponseComputerToolCall, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput, + OpenAI::Responses::ResponseFunctionWebSearch, + OpenAI::Responses::ResponseFunctionToolCall, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput, + OpenAI::Responses::ResponseReasoningItem, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall, + OpenAI::Responses::ResponseCodeInterpreterToolCall, + OpenAI::Responses::ResponseInputItem::LocalShellCall, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Responses::ResponseInputItem::McpListTools, + OpenAI::Responses::ResponseInputItem::McpApprovalRequest, + OpenAI::Responses::ResponseInputItem::McpApprovalResponse, + OpenAI::Responses::ResponseInputItem::McpCall, + OpenAI::Responses::ResponseCustomToolCallOutput, + OpenAI::Responses::ResponseCustomToolCall, + OpenAI::Responses::ResponseInputItem::ItemReference + ) + ] + ) + ) + end + attr_accessor :items + + # Set of 16 key-value pairs that can be attached to an object. Useful for storing + # additional information about the object in a structured format. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + items: + T.nilable( + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage::OrHash, + OpenAI::Responses::ResponseInputItem::Message::OrHash, + OpenAI::Responses::ResponseOutputMessage::OrHash, + OpenAI::Responses::ResponseFileSearchToolCall::OrHash, + OpenAI::Responses::ResponseComputerToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::OrHash, + OpenAI::Responses::ResponseFunctionToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::OrHash, + OpenAI::Responses::ResponseReasoningItem::OrHash, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::OrHash, + OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::McpListTools::OrHash, + OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash, + OpenAI::Responses::ResponseInputItem::McpCall::OrHash, + OpenAI::Responses::ResponseCustomToolCallOutput::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::ItemReference::OrHash + ) + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Initial items to include in the conversation context. You may add up to 20 items + # at a time. + items: nil, + # Set of 16 key-value pairs that can be attached to an object. Useful for storing + # additional information about the object in a structured format. + metadata: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + items: + T.nilable( + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage, + OpenAI::Responses::ResponseInputItem::Message, + OpenAI::Responses::ResponseOutputMessage, + OpenAI::Responses::ResponseFileSearchToolCall, + OpenAI::Responses::ResponseComputerToolCall, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput, + OpenAI::Responses::ResponseFunctionWebSearch, + OpenAI::Responses::ResponseFunctionToolCall, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput, + OpenAI::Responses::ResponseReasoningItem, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall, + OpenAI::Responses::ResponseCodeInterpreterToolCall, + OpenAI::Responses::ResponseInputItem::LocalShellCall, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Responses::ResponseInputItem::McpListTools, + OpenAI::Responses::ResponseInputItem::McpApprovalRequest, + OpenAI::Responses::ResponseInputItem::McpApprovalResponse, + OpenAI::Responses::ResponseInputItem::McpCall, + OpenAI::Responses::ResponseCustomToolCallOutput, + OpenAI::Responses::ResponseCustomToolCall, + OpenAI::Responses::ResponseInputItem::ItemReference + ) + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/conversation_delete_params.rbi b/rbi/openai/models/conversations/conversation_delete_params.rbi new file mode 100644 index 00000000..672f47bf --- /dev/null +++ b/rbi/openai/models/conversations/conversation_delete_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ConversationDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationDeleteParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/conversation_deleted.rbi b/rbi/openai/models/conversations/conversation_deleted.rbi new file mode 100644 index 00000000..eabe5dd0 --- /dev/null +++ b/rbi/openai/models/conversations/conversation_deleted.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ConversationDeleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationDeleted, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :id + + sig { returns(T::Boolean) } + attr_accessor :deleted + + sig { returns(Symbol) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: Symbol).returns( + T.attached_class + ) + end + def self.new(id:, deleted:, object: :"conversation.deleted") + end + + sig do + override.returns({ id: String, deleted: T::Boolean, object: Symbol }) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/conversation_deleted_resource.rbi b/rbi/openai/models/conversations/conversation_deleted_resource.rbi new file mode 100644 index 00000000..a415a346 --- /dev/null +++ b/rbi/openai/models/conversations/conversation_deleted_resource.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ConversationDeletedResource < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationDeletedResource, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :id + + sig { returns(T::Boolean) } + attr_accessor :deleted + + sig { returns(Symbol) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: Symbol).returns( + T.attached_class + ) + end + def self.new(id:, deleted:, object: :"conversation.deleted") + end + + sig do + override.returns({ id: String, deleted: T::Boolean, object: Symbol }) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/conversation_item.rbi b/rbi/openai/models/conversations/conversation_item.rbi new file mode 100644 index 00000000..d4c14870 --- /dev/null +++ b/rbi/openai/models/conversations/conversation_item.rbi @@ -0,0 +1,835 @@ +# typed: strong + +module OpenAI + module Models + ConversationItem = Conversations::ConversationItem + + module Conversations + # A single item within a conversation. The set of possible types are the same as + # the `output` type of a + # [Response object](https://platform.openai.com/docs/api-reference/responses/object#responses/object-output). + module ConversationItem + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Conversations::Message, + OpenAI::Responses::ResponseFunctionToolCallItem, + OpenAI::Responses::ResponseFunctionToolCallOutputItem, + OpenAI::Responses::ResponseFileSearchToolCall, + OpenAI::Responses::ResponseFunctionWebSearch, + OpenAI::Conversations::ConversationItem::ImageGenerationCall, + OpenAI::Responses::ResponseComputerToolCall, + OpenAI::Responses::ResponseComputerToolCallOutputItem, + OpenAI::Responses::ResponseReasoningItem, + OpenAI::Responses::ResponseCodeInterpreterToolCall, + OpenAI::Conversations::ConversationItem::LocalShellCall, + OpenAI::Conversations::ConversationItem::LocalShellCallOutput, + OpenAI::Conversations::ConversationItem::McpListTools, + OpenAI::Conversations::ConversationItem::McpApprovalRequest, + OpenAI::Conversations::ConversationItem::McpApprovalResponse, + OpenAI::Conversations::ConversationItem::McpCall, + OpenAI::Responses::ResponseCustomToolCall, + OpenAI::Responses::ResponseCustomToolCallOutput + ) + end + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationItem::ImageGenerationCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the image generation call. + sig { returns(String) } + attr_accessor :id + + # The generated image encoded in base64. + sig { returns(T.nilable(String)) } + attr_accessor :result + + # The status of the image generation call. + sig do + returns( + OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The type of the image generation call. Always `image_generation_call`. + sig { returns(Symbol) } + attr_accessor :type + + # An image generation request made by the model. + sig do + params( + id: String, + result: T.nilable(String), + status: + OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the image generation call. + id:, + # The generated image encoded in base64. + result:, + # The status of the image generation call. + status:, + # The type of the image generation call. Always `image_generation_call`. + type: :image_generation_call + ) + end + + sig do + override.returns( + { + id: String, + result: T.nilable(String), + status: + OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The status of the image generation call. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status::TaggedSymbol + ) + GENERATING = + T.let( + :generating, + OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationItem::LocalShellCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the local shell call. + sig { returns(String) } + attr_accessor :id + + # Execute a shell command on the server. + sig do + returns( + OpenAI::Conversations::ConversationItem::LocalShellCall::Action + ) + end + attr_reader :action + + sig do + params( + action: + OpenAI::Conversations::ConversationItem::LocalShellCall::Action::OrHash + ).void + end + attr_writer :action + + # The unique ID of the local shell tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The status of the local shell call. + sig do + returns( + OpenAI::Conversations::ConversationItem::LocalShellCall::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The type of the local shell call. Always `local_shell_call`. + sig { returns(Symbol) } + attr_accessor :type + + # A tool call to run a command on the local shell. + sig do + params( + id: String, + action: + OpenAI::Conversations::ConversationItem::LocalShellCall::Action::OrHash, + call_id: String, + status: + OpenAI::Conversations::ConversationItem::LocalShellCall::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the local shell call. + id:, + # Execute a shell command on the server. + action:, + # The unique ID of the local shell tool call generated by the model. + call_id:, + # The status of the local shell call. + status:, + # The type of the local shell call. Always `local_shell_call`. + type: :local_shell_call + ) + end + + sig do + override.returns( + { + id: String, + action: + OpenAI::Conversations::ConversationItem::LocalShellCall::Action, + call_id: String, + status: + OpenAI::Conversations::ConversationItem::LocalShellCall::Status::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + class Action < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationItem::LocalShellCall::Action, + OpenAI::Internal::AnyHash + ) + end + + # The command to run. + sig { returns(T::Array[String]) } + attr_accessor :command + + # Environment variables to set for the command. + sig { returns(T::Hash[Symbol, String]) } + attr_accessor :env + + # The type of the local shell action. Always `exec`. + sig { returns(Symbol) } + attr_accessor :type + + # Optional timeout in milliseconds for the command. + sig { returns(T.nilable(Integer)) } + attr_accessor :timeout_ms + + # Optional user to run the command as. + sig { returns(T.nilable(String)) } + attr_accessor :user + + # Optional working directory to run the command in. + sig { returns(T.nilable(String)) } + attr_accessor :working_directory + + # Execute a shell command on the server. + sig do + params( + command: T::Array[String], + env: T::Hash[Symbol, String], + timeout_ms: T.nilable(Integer), + user: T.nilable(String), + working_directory: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The command to run. + command:, + # Environment variables to set for the command. + env:, + # Optional timeout in milliseconds for the command. + timeout_ms: nil, + # Optional user to run the command as. + user: nil, + # Optional working directory to run the command in. + working_directory: nil, + # The type of the local shell action. Always `exec`. + type: :exec + ) + end + + sig do + override.returns( + { + command: T::Array[String], + env: T::Hash[Symbol, String], + type: Symbol, + timeout_ms: T.nilable(Integer), + user: T.nilable(String), + working_directory: T.nilable(String) + } + ) + end + def to_hash + end + end + + # The status of the local shell call. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Conversations::ConversationItem::LocalShellCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Conversations::ConversationItem::LocalShellCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Conversations::ConversationItem::LocalShellCall::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Conversations::ConversationItem::LocalShellCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Conversations::ConversationItem::LocalShellCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationItem::LocalShellCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the local shell tool call generated by the model. + sig { returns(String) } + attr_accessor :id + + # A JSON string of the output of the local shell tool call. + sig { returns(String) } + attr_accessor :output + + # The type of the local shell tool call output. Always `local_shell_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + sig do + returns( + T.nilable( + OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status::TaggedSymbol + ) + ) + end + attr_accessor :status + + # The output of a local shell tool call. + sig do + params( + id: String, + output: String, + status: + T.nilable( + OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status::OrSymbol + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the local shell tool call generated by the model. + id:, + # A JSON string of the output of the local shell tool call. + output:, + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + status: nil, + # The type of the local shell tool call output. Always `local_shell_call_output`. + type: :local_shell_call_output + ) + end + + sig do + override.returns( + { + id: String, + output: String, + type: Symbol, + status: + T.nilable( + OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status::TaggedSymbol + ) + } + ) + end + def to_hash + end + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class McpListTools < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationItem::McpListTools, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the list. + sig { returns(String) } + attr_accessor :id + + # The label of the MCP server. + sig { returns(String) } + attr_accessor :server_label + + # The tools available on the server. + sig do + returns( + T::Array[ + OpenAI::Conversations::ConversationItem::McpListTools::Tool + ] + ) + end + attr_accessor :tools + + # The type of the item. Always `mcp_list_tools`. + sig { returns(Symbol) } + attr_accessor :type + + # Error message if the server could not list tools. + sig { returns(T.nilable(String)) } + attr_accessor :error + + # A list of tools available on an MCP server. + sig do + params( + id: String, + server_label: String, + tools: + T::Array[ + OpenAI::Conversations::ConversationItem::McpListTools::Tool::OrHash + ], + error: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the list. + id:, + # The label of the MCP server. + server_label:, + # The tools available on the server. + tools:, + # Error message if the server could not list tools. + error: nil, + # The type of the item. Always `mcp_list_tools`. + type: :mcp_list_tools + ) + end + + sig do + override.returns( + { + id: String, + server_label: String, + tools: + T::Array[ + OpenAI::Conversations::ConversationItem::McpListTools::Tool + ], + type: Symbol, + error: T.nilable(String) + } + ) + end + def to_hash + end + + class Tool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationItem::McpListTools::Tool, + OpenAI::Internal::AnyHash + ) + end + + # The JSON schema describing the tool's input. + sig { returns(T.anything) } + attr_accessor :input_schema + + # The name of the tool. + sig { returns(String) } + attr_accessor :name + + # Additional annotations about the tool. + sig { returns(T.nilable(T.anything)) } + attr_accessor :annotations + + # The description of the tool. + sig { returns(T.nilable(String)) } + attr_accessor :description + + # A tool available on an MCP server. + sig do + params( + input_schema: T.anything, + name: String, + annotations: T.nilable(T.anything), + description: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + # The JSON schema describing the tool's input. + input_schema:, + # The name of the tool. + name:, + # Additional annotations about the tool. + annotations: nil, + # The description of the tool. + description: nil + ) + end + + sig do + override.returns( + { + input_schema: T.anything, + name: String, + annotations: T.nilable(T.anything), + description: T.nilable(String) + } + ) + end + def to_hash + end + end + end + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationItem::McpApprovalRequest, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the approval request. + sig { returns(String) } + attr_accessor :id + + # A JSON string of arguments for the tool. + sig { returns(String) } + attr_accessor :arguments + + # The name of the tool to run. + sig { returns(String) } + attr_accessor :name + + # The label of the MCP server making the request. + sig { returns(String) } + attr_accessor :server_label + + # The type of the item. Always `mcp_approval_request`. + sig { returns(Symbol) } + attr_accessor :type + + # A request for human approval of a tool invocation. + sig do + params( + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the approval request. + id:, + # A JSON string of arguments for the tool. + arguments:, + # The name of the tool to run. + name:, + # The label of the MCP server making the request. + server_label:, + # The type of the item. Always `mcp_approval_request`. + type: :mcp_approval_request + ) + end + + sig do + override.returns( + { + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol + } + ) + end + def to_hash + end + end + + class McpApprovalResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationItem::McpApprovalResponse, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the approval response + sig { returns(String) } + attr_accessor :id + + # The ID of the approval request being answered. + sig { returns(String) } + attr_accessor :approval_request_id + + # Whether the request was approved. + sig { returns(T::Boolean) } + attr_accessor :approve + + # The type of the item. Always `mcp_approval_response`. + sig { returns(Symbol) } + attr_accessor :type + + # Optional reason for the decision. + sig { returns(T.nilable(String)) } + attr_accessor :reason + + # A response to an MCP approval request. + sig do + params( + id: String, + approval_request_id: String, + approve: T::Boolean, + reason: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the approval response + id:, + # The ID of the approval request being answered. + approval_request_id:, + # Whether the request was approved. + approve:, + # Optional reason for the decision. + reason: nil, + # The type of the item. Always `mcp_approval_response`. + type: :mcp_approval_response + ) + end + + sig do + override.returns( + { + id: String, + approval_request_id: String, + approve: T::Boolean, + type: Symbol, + reason: T.nilable(String) + } + ) + end + def to_hash + end + end + + class McpCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationItem::McpCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the tool call. + sig { returns(String) } + attr_accessor :id + + # A JSON string of the arguments passed to the tool. + sig { returns(String) } + attr_accessor :arguments + + # The name of the tool that was run. + sig { returns(String) } + attr_accessor :name + + # The label of the MCP server running the tool. + sig { returns(String) } + attr_accessor :server_label + + # The type of the item. Always `mcp_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The error from the tool call, if any. + sig { returns(T.nilable(String)) } + attr_accessor :error + + # The output from the tool call. + sig { returns(T.nilable(String)) } + attr_accessor :output + + # An invocation of a tool on an MCP server. + sig do + params( + id: String, + arguments: String, + name: String, + server_label: String, + error: T.nilable(String), + output: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the tool call. + id:, + # A JSON string of the arguments passed to the tool. + arguments:, + # The name of the tool that was run. + name:, + # The label of the MCP server running the tool. + server_label:, + # The error from the tool call, if any. + error: nil, + # The output from the tool call. + output: nil, + # The type of the item. Always `mcp_call`. + type: :mcp_call + ) + end + + sig do + override.returns( + { + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol, + error: T.nilable(String), + output: T.nilable(String) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[OpenAI::Conversations::ConversationItem::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/conversations/conversation_item_list.rbi b/rbi/openai/models/conversations/conversation_item_list.rbi new file mode 100644 index 00000000..f8931151 --- /dev/null +++ b/rbi/openai/models/conversations/conversation_item_list.rbi @@ -0,0 +1,101 @@ +# typed: strong + +module OpenAI + module Models + ConversationItemList = Conversations::ConversationItemList + + module Conversations + class ConversationItemList < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationItemList, + OpenAI::Internal::AnyHash + ) + end + + # A list of conversation items. + sig do + returns(T::Array[OpenAI::Conversations::ConversationItem::Variants]) + end + attr_accessor :data + + # The ID of the first item in the list. + sig { returns(String) } + attr_accessor :first_id + + # Whether there are more items available. + sig { returns(T::Boolean) } + attr_accessor :has_more + + # The ID of the last item in the list. + sig { returns(String) } + attr_accessor :last_id + + # The type of object returned, must be `list`. + sig { returns(Symbol) } + attr_accessor :object + + # A list of Conversation items. + sig do + params( + data: + T::Array[ + T.any( + OpenAI::Conversations::Message::OrHash, + OpenAI::Responses::ResponseFunctionToolCallItem::OrHash, + OpenAI::Responses::ResponseFunctionToolCallOutputItem::OrHash, + OpenAI::Responses::ResponseFileSearchToolCall::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::OrHash, + OpenAI::Conversations::ConversationItem::ImageGenerationCall::OrHash, + OpenAI::Responses::ResponseComputerToolCall::OrHash, + OpenAI::Responses::ResponseComputerToolCallOutputItem::OrHash, + OpenAI::Responses::ResponseReasoningItem::OrHash, + OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, + OpenAI::Conversations::ConversationItem::LocalShellCall::OrHash, + OpenAI::Conversations::ConversationItem::LocalShellCallOutput::OrHash, + OpenAI::Conversations::ConversationItem::McpListTools::OrHash, + OpenAI::Conversations::ConversationItem::McpApprovalRequest::OrHash, + OpenAI::Conversations::ConversationItem::McpApprovalResponse::OrHash, + OpenAI::Conversations::ConversationItem::McpCall::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash, + OpenAI::Responses::ResponseCustomToolCallOutput::OrHash + ) + ], + first_id: String, + has_more: T::Boolean, + last_id: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # A list of conversation items. + data:, + # The ID of the first item in the list. + first_id:, + # Whether there are more items available. + has_more:, + # The ID of the last item in the list. + last_id:, + # The type of object returned, must be `list`. + object: :list + ) + end + + sig do + override.returns( + { + data: T::Array[OpenAI::Conversations::ConversationItem::Variants], + first_id: String, + has_more: T::Boolean, + last_id: String, + object: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/conversation_retrieve_params.rbi b/rbi/openai/models/conversations/conversation_retrieve_params.rbi new file mode 100644 index 00000000..3d403724 --- /dev/null +++ b/rbi/openai/models/conversations/conversation_retrieve_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ConversationRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/conversation_update_params.rbi b/rbi/openai/models/conversations/conversation_update_params.rbi new file mode 100644 index 00000000..5edfee7f --- /dev/null +++ b/rbi/openai/models/conversations/conversation_update_params.rbi @@ -0,0 +1,56 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ConversationUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ConversationUpdateParams, + OpenAI::Internal::AnyHash + ) + end + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters. + sig { returns(T::Hash[Symbol, String]) } + attr_accessor :metadata + + sig do + params( + metadata: T::Hash[Symbol, String], + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters. + metadata:, + request_options: {} + ) + end + + sig do + override.returns( + { + metadata: T::Hash[Symbol, String], + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/file_citation_body.rbi b/rbi/openai/models/conversations/file_citation_body.rbi new file mode 100644 index 00000000..ec14bf91 --- /dev/null +++ b/rbi/openai/models/conversations/file_citation_body.rbi @@ -0,0 +1,61 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class FileCitationBody < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::FileCitationBody, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file. + sig { returns(String) } + attr_accessor :file_id + + # The filename of the file cited. + sig { returns(String) } + attr_accessor :filename + + # The index of the file in the list of files. + sig { returns(Integer) } + attr_accessor :index + + # The type of the file citation. Always `file_citation`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + file_id: String, + filename: String, + index: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the file. + file_id:, + # The filename of the file cited. + filename:, + # The index of the file in the list of files. + index:, + # The type of the file citation. Always `file_citation`. + type: :file_citation + ) + end + + sig do + override.returns( + { file_id: String, filename: String, index: Integer, type: Symbol } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/input_file_content.rbi b/rbi/openai/models/conversations/input_file_content.rbi new file mode 100644 index 00000000..5516a933 --- /dev/null +++ b/rbi/openai/models/conversations/input_file_content.rbi @@ -0,0 +1,72 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class InputFileContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::InputFileContent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file to be sent to the model. + sig { returns(T.nilable(String)) } + attr_accessor :file_id + + # The type of the input item. Always `input_file`. + sig { returns(Symbol) } + attr_accessor :type + + # The URL of the file to be sent to the model. + sig { returns(T.nilable(String)) } + attr_reader :file_url + + sig { params(file_url: String).void } + attr_writer :file_url + + # The name of the file to be sent to the model. + sig { returns(T.nilable(String)) } + attr_reader :filename + + sig { params(filename: String).void } + attr_writer :filename + + sig do + params( + file_id: T.nilable(String), + file_url: String, + filename: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the file to be sent to the model. + file_id:, + # The URL of the file to be sent to the model. + file_url: nil, + # The name of the file to be sent to the model. + filename: nil, + # The type of the input item. Always `input_file`. + type: :input_file + ) + end + + sig do + override.returns( + { + file_id: T.nilable(String), + type: Symbol, + file_url: String, + filename: String + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/input_image_content.rbi b/rbi/openai/models/conversations/input_image_content.rbi new file mode 100644 index 00000000..b9b48a84 --- /dev/null +++ b/rbi/openai/models/conversations/input_image_content.rbi @@ -0,0 +1,113 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class InputImageContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::InputImageContent, + OpenAI::Internal::AnyHash + ) + end + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig do + returns( + OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol + ) + end + attr_accessor :detail + + # The ID of the file to be sent to the model. + sig { returns(T.nilable(String)) } + attr_accessor :file_id + + # The URL of the image to be sent to the model. A fully qualified URL or base64 + # encoded image in a data URL. + sig { returns(T.nilable(String)) } + attr_accessor :image_url + + # The type of the input item. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + detail: OpenAI::Conversations::InputImageContent::Detail::OrSymbol, + file_id: T.nilable(String), + image_url: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail:, + # The ID of the file to be sent to the model. + file_id:, + # The URL of the image to be sent to the model. A fully qualified URL or base64 + # encoded image in a data URL. + image_url:, + # The type of the input item. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { + detail: + OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol, + file_id: T.nilable(String), + image_url: T.nilable(String), + type: Symbol + } + ) + end + def to_hash + end + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + module Detail + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Conversations::InputImageContent::Detail) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let( + :low, + OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Conversations::InputImageContent::Detail::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/conversations/input_text_content.rbi b/rbi/openai/models/conversations/input_text_content.rbi new file mode 100644 index 00000000..1f8e0760 --- /dev/null +++ b/rbi/openai/models/conversations/input_text_content.rbi @@ -0,0 +1,38 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class InputTextContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::InputTextContent, + OpenAI::Internal::AnyHash + ) + end + + # The text input to the model. + sig { returns(String) } + attr_accessor :text + + # The type of the input item. Always `input_text`. + sig { returns(Symbol) } + attr_accessor :type + + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The text input to the model. + text:, + # The type of the input item. Always `input_text`. + type: :input_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/item_create_params.rbi b/rbi/openai/models/conversations/item_create_params.rbi new file mode 100644 index 00000000..2c68cbe9 --- /dev/null +++ b/rbi/openai/models/conversations/item_create_params.rbi @@ -0,0 +1,150 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ItemCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ItemCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # The items to add to the conversation. You may add up to 20 items at a time. + sig do + returns( + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage, + OpenAI::Responses::ResponseInputItem::Message, + OpenAI::Responses::ResponseOutputMessage, + OpenAI::Responses::ResponseFileSearchToolCall, + OpenAI::Responses::ResponseComputerToolCall, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput, + OpenAI::Responses::ResponseFunctionWebSearch, + OpenAI::Responses::ResponseFunctionToolCall, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput, + OpenAI::Responses::ResponseReasoningItem, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall, + OpenAI::Responses::ResponseCodeInterpreterToolCall, + OpenAI::Responses::ResponseInputItem::LocalShellCall, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Responses::ResponseInputItem::McpListTools, + OpenAI::Responses::ResponseInputItem::McpApprovalRequest, + OpenAI::Responses::ResponseInputItem::McpApprovalResponse, + OpenAI::Responses::ResponseInputItem::McpCall, + OpenAI::Responses::ResponseCustomToolCallOutput, + OpenAI::Responses::ResponseCustomToolCall, + OpenAI::Responses::ResponseInputItem::ItemReference + ) + ] + ) + end + attr_accessor :items + + # Additional fields to include in the response. See the `include` parameter for + # [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + # for more information. + sig do + returns( + T.nilable(T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]) + ) + end + attr_reader :include + + sig do + params( + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] + ).void + end + attr_writer :include + + sig do + params( + items: + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage::OrHash, + OpenAI::Responses::ResponseInputItem::Message::OrHash, + OpenAI::Responses::ResponseOutputMessage::OrHash, + OpenAI::Responses::ResponseFileSearchToolCall::OrHash, + OpenAI::Responses::ResponseComputerToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::OrHash, + OpenAI::Responses::ResponseFunctionToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::OrHash, + OpenAI::Responses::ResponseReasoningItem::OrHash, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::OrHash, + OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::McpListTools::OrHash, + OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash, + OpenAI::Responses::ResponseInputItem::McpCall::OrHash, + OpenAI::Responses::ResponseCustomToolCallOutput::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::ItemReference::OrHash + ) + ], + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The items to add to the conversation. You may add up to 20 items at a time. + items:, + # Additional fields to include in the response. See the `include` parameter for + # [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + # for more information. + include: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + items: + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage, + OpenAI::Responses::ResponseInputItem::Message, + OpenAI::Responses::ResponseOutputMessage, + OpenAI::Responses::ResponseFileSearchToolCall, + OpenAI::Responses::ResponseComputerToolCall, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput, + OpenAI::Responses::ResponseFunctionWebSearch, + OpenAI::Responses::ResponseFunctionToolCall, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput, + OpenAI::Responses::ResponseReasoningItem, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall, + OpenAI::Responses::ResponseCodeInterpreterToolCall, + OpenAI::Responses::ResponseInputItem::LocalShellCall, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Responses::ResponseInputItem::McpListTools, + OpenAI::Responses::ResponseInputItem::McpApprovalRequest, + OpenAI::Responses::ResponseInputItem::McpApprovalResponse, + OpenAI::Responses::ResponseInputItem::McpCall, + OpenAI::Responses::ResponseCustomToolCallOutput, + OpenAI::Responses::ResponseCustomToolCall, + OpenAI::Responses::ResponseInputItem::ItemReference + ) + ], + include: + T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/item_delete_params.rbi b/rbi/openai/models/conversations/item_delete_params.rbi new file mode 100644 index 00000000..d0464e8e --- /dev/null +++ b/rbi/openai/models/conversations/item_delete_params.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ItemDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ItemDeleteParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :conversation_id + + sig do + params( + conversation_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(conversation_id:, request_options: {}) + end + + sig do + override.returns( + { conversation_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/item_list_params.rbi b/rbi/openai/models/conversations/item_list_params.rbi new file mode 100644 index 00000000..2239e139 --- /dev/null +++ b/rbi/openai/models/conversations/item_list_params.rbi @@ -0,0 +1,174 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ItemListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ItemListParams, + OpenAI::Internal::AnyHash + ) + end + + # An item ID to list items after, used in pagination. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). + sig do + returns( + T.nilable(T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]) + ) + end + attr_reader :include + + sig do + params( + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] + ).void + end + attr_writer :include + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + sig do + returns( + T.nilable(OpenAI::Conversations::ItemListParams::Order::OrSymbol) + ) + end + attr_reader :order + + sig do + params( + order: OpenAI::Conversations::ItemListParams::Order::OrSymbol + ).void + end + attr_writer :order + + sig do + params( + after: String, + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + limit: Integer, + order: OpenAI::Conversations::ItemListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # An item ID to list items after, used in pagination. + after: nil, + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). + include: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + include: + T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + limit: Integer, + order: OpenAI::Conversations::ItemListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Conversations::ItemListParams::Order) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let( + :asc, + OpenAI::Conversations::ItemListParams::Order::TaggedSymbol + ) + DESC = + T.let( + :desc, + OpenAI::Conversations::ItemListParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Conversations::ItemListParams::Order::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/conversations/item_retrieve_params.rbi b/rbi/openai/models/conversations/item_retrieve_params.rbi new file mode 100644 index 00000000..e175c837 --- /dev/null +++ b/rbi/openai/models/conversations/item_retrieve_params.rbi @@ -0,0 +1,70 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class ItemRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::ItemRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :conversation_id + + # Additional fields to include in the response. See the `include` parameter for + # [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + # for more information. + sig do + returns( + T.nilable(T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]) + ) + end + attr_reader :include + + sig do + params( + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] + ).void + end + attr_writer :include + + sig do + params( + conversation_id: String, + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + conversation_id:, + # Additional fields to include in the response. See the `include` parameter for + # [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + # for more information. + include: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + conversation_id: String, + include: + T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/lob_prob.rbi b/rbi/openai/models/conversations/lob_prob.rbi new file mode 100644 index 00000000..737d318b --- /dev/null +++ b/rbi/openai/models/conversations/lob_prob.rbi @@ -0,0 +1,50 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class LobProb < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Conversations::LobProb, OpenAI::Internal::AnyHash) + end + + sig { returns(String) } + attr_accessor :token + + sig { returns(T::Array[Integer]) } + attr_accessor :bytes + + sig { returns(Float) } + attr_accessor :logprob + + sig { returns(T::Array[OpenAI::Conversations::TopLogProb]) } + attr_accessor :top_logprobs + + sig do + params( + token: String, + bytes: T::Array[Integer], + logprob: Float, + top_logprobs: T::Array[OpenAI::Conversations::TopLogProb::OrHash] + ).returns(T.attached_class) + end + def self.new(token:, bytes:, logprob:, top_logprobs:) + end + + sig do + override.returns( + { + token: String, + bytes: T::Array[Integer], + logprob: Float, + top_logprobs: T::Array[OpenAI::Conversations::TopLogProb] + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/message.rbi b/rbi/openai/models/conversations/message.rbi new file mode 100644 index 00000000..6d6a4e07 --- /dev/null +++ b/rbi/openai/models/conversations/message.rbi @@ -0,0 +1,196 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class Message < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Conversations::Message, OpenAI::Internal::AnyHash) + end + + # The unique ID of the message. + sig { returns(String) } + attr_accessor :id + + # The content of the message + sig do + returns(T::Array[OpenAI::Conversations::Message::Content::Variants]) + end + attr_accessor :content + + # The role of the message. One of `unknown`, `user`, `assistant`, `system`, + # `critic`, `discriminator`, `developer`, or `tool`. + sig { returns(OpenAI::Conversations::Message::Role::TaggedSymbol) } + attr_accessor :role + + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + sig { returns(OpenAI::Conversations::Message::Status::TaggedSymbol) } + attr_accessor :status + + # The type of the message. Always set to `message`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + id: String, + content: + T::Array[ + T.any( + OpenAI::Conversations::InputTextContent::OrHash, + OpenAI::Conversations::OutputTextContent::OrHash, + OpenAI::Conversations::TextContent::OrHash, + OpenAI::Conversations::SummaryTextContent::OrHash, + OpenAI::Conversations::RefusalContent::OrHash, + OpenAI::Conversations::InputImageContent::OrHash, + OpenAI::Conversations::ComputerScreenshotContent::OrHash, + OpenAI::Conversations::InputFileContent::OrHash + ) + ], + role: OpenAI::Conversations::Message::Role::OrSymbol, + status: OpenAI::Conversations::Message::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the message. + id:, + # The content of the message + content:, + # The role of the message. One of `unknown`, `user`, `assistant`, `system`, + # `critic`, `discriminator`, `developer`, or `tool`. + role:, + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + status:, + # The type of the message. Always set to `message`. + type: :message + ) + end + + sig do + override.returns( + { + id: String, + content: + T::Array[OpenAI::Conversations::Message::Content::Variants], + role: OpenAI::Conversations::Message::Role::TaggedSymbol, + status: OpenAI::Conversations::Message::Status::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Conversations::InputTextContent, + OpenAI::Conversations::OutputTextContent, + OpenAI::Conversations::TextContent, + OpenAI::Conversations::SummaryTextContent, + OpenAI::Conversations::RefusalContent, + OpenAI::Conversations::InputImageContent, + OpenAI::Conversations::ComputerScreenshotContent, + OpenAI::Conversations::InputFileContent + ) + end + + sig do + override.returns( + T::Array[OpenAI::Conversations::Message::Content::Variants] + ) + end + def self.variants + end + end + + # The role of the message. One of `unknown`, `user`, `assistant`, `system`, + # `critic`, `discriminator`, `developer`, or `tool`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Conversations::Message::Role) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + UNKNOWN = + T.let(:unknown, OpenAI::Conversations::Message::Role::TaggedSymbol) + USER = + T.let(:user, OpenAI::Conversations::Message::Role::TaggedSymbol) + ASSISTANT = + T.let( + :assistant, + OpenAI::Conversations::Message::Role::TaggedSymbol + ) + SYSTEM = + T.let(:system, OpenAI::Conversations::Message::Role::TaggedSymbol) + CRITIC = + T.let(:critic, OpenAI::Conversations::Message::Role::TaggedSymbol) + DISCRIMINATOR = + T.let( + :discriminator, + OpenAI::Conversations::Message::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Conversations::Message::Role::TaggedSymbol + ) + TOOL = + T.let(:tool, OpenAI::Conversations::Message::Role::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Conversations::Message::Role::TaggedSymbol] + ) + end + def self.values + end + end + + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Conversations::Message::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Conversations::Message::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Conversations::Message::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Conversations::Message::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Conversations::Message::Status::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/conversations/output_text_content.rbi b/rbi/openai/models/conversations/output_text_content.rbi new file mode 100644 index 00000000..f9c643c6 --- /dev/null +++ b/rbi/openai/models/conversations/output_text_content.rbi @@ -0,0 +1,110 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class OutputTextContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::OutputTextContent, + OpenAI::Internal::AnyHash + ) + end + + # The annotations of the text output. + sig do + returns( + T::Array[ + OpenAI::Conversations::OutputTextContent::Annotation::Variants + ] + ) + end + attr_accessor :annotations + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + sig { returns(T.nilable(T::Array[OpenAI::Conversations::LobProb])) } + attr_reader :logprobs + + sig do + params( + logprobs: T::Array[OpenAI::Conversations::LobProb::OrHash] + ).void + end + attr_writer :logprobs + + sig do + params( + annotations: + T::Array[ + T.any( + OpenAI::Conversations::FileCitationBody::OrHash, + OpenAI::Conversations::URLCitationBody::OrHash, + OpenAI::Conversations::ContainerFileCitationBody::OrHash + ) + ], + text: String, + logprobs: T::Array[OpenAI::Conversations::LobProb::OrHash], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The annotations of the text output. + annotations:, + # The text output from the model. + text:, + logprobs: nil, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig do + override.returns( + { + annotations: + T::Array[ + OpenAI::Conversations::OutputTextContent::Annotation::Variants + ], + text: String, + type: Symbol, + logprobs: T::Array[OpenAI::Conversations::LobProb] + } + ) + end + def to_hash + end + + module Annotation + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Conversations::FileCitationBody, + OpenAI::Conversations::URLCitationBody, + OpenAI::Conversations::ContainerFileCitationBody + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Conversations::OutputTextContent::Annotation::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/conversations/refusal_content.rbi b/rbi/openai/models/conversations/refusal_content.rbi new file mode 100644 index 00000000..77516323 --- /dev/null +++ b/rbi/openai/models/conversations/refusal_content.rbi @@ -0,0 +1,38 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class RefusalContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::RefusalContent, + OpenAI::Internal::AnyHash + ) + end + + # The refusal explanation from the model. + sig { returns(String) } + attr_accessor :refusal + + # The type of the refusal. Always `refusal`. + sig { returns(Symbol) } + attr_accessor :type + + sig { params(refusal: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The refusal explanation from the model. + refusal:, + # The type of the refusal. Always `refusal`. + type: :refusal + ) + end + + sig { override.returns({ refusal: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/summary_text_content.rbi b/rbi/openai/models/conversations/summary_text_content.rbi new file mode 100644 index 00000000..7292fa8d --- /dev/null +++ b/rbi/openai/models/conversations/summary_text_content.rbi @@ -0,0 +1,31 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class SummaryTextContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::SummaryTextContent, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :text + + sig { returns(Symbol) } + attr_accessor :type + + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new(text:, type: :summary_text) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/text_content.rbi b/rbi/openai/models/conversations/text_content.rbi new file mode 100644 index 00000000..abb7a442 --- /dev/null +++ b/rbi/openai/models/conversations/text_content.rbi @@ -0,0 +1,28 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class TextContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Conversations::TextContent, OpenAI::Internal::AnyHash) + end + + sig { returns(String) } + attr_accessor :text + + sig { returns(Symbol) } + attr_accessor :type + + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new(text:, type: :text) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/top_log_prob.rbi b/rbi/openai/models/conversations/top_log_prob.rbi new file mode 100644 index 00000000..c11b651a --- /dev/null +++ b/rbi/openai/models/conversations/top_log_prob.rbi @@ -0,0 +1,41 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class TopLogProb < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Conversations::TopLogProb, OpenAI::Internal::AnyHash) + end + + sig { returns(String) } + attr_accessor :token + + sig { returns(T::Array[Integer]) } + attr_accessor :bytes + + sig { returns(Float) } + attr_accessor :logprob + + sig do + params( + token: String, + bytes: T::Array[Integer], + logprob: Float + ).returns(T.attached_class) + end + def self.new(token:, bytes:, logprob:) + end + + sig do + override.returns( + { token: String, bytes: T::Array[Integer], logprob: Float } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/conversations/url_citation_body.rbi b/rbi/openai/models/conversations/url_citation_body.rbi new file mode 100644 index 00000000..4c34ad3d --- /dev/null +++ b/rbi/openai/models/conversations/url_citation_body.rbi @@ -0,0 +1,74 @@ +# typed: strong + +module OpenAI + module Models + module Conversations + class URLCitationBody < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Conversations::URLCitationBody, + OpenAI::Internal::AnyHash + ) + end + + # The index of the last character of the URL citation in the message. + sig { returns(Integer) } + attr_accessor :end_index + + # The index of the first character of the URL citation in the message. + sig { returns(Integer) } + attr_accessor :start_index + + # The title of the web resource. + sig { returns(String) } + attr_accessor :title + + # The type of the URL citation. Always `url_citation`. + sig { returns(Symbol) } + attr_accessor :type + + # The URL of the web resource. + sig { returns(String) } + attr_accessor :url + + sig do + params( + end_index: Integer, + start_index: Integer, + title: String, + url: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the last character of the URL citation in the message. + end_index:, + # The index of the first character of the URL citation in the message. + start_index:, + # The title of the web resource. + title:, + # The URL of the web resource. + url:, + # The type of the URL citation. Always `url_citation`. + type: :url_citation + ) + end + + sig do + override.returns( + { + end_index: Integer, + start_index: Integer, + title: String, + type: Symbol, + url: String + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/create_embedding_response.rbi b/rbi/openai/models/create_embedding_response.rbi new file mode 100644 index 00000000..3228933e --- /dev/null +++ b/rbi/openai/models/create_embedding_response.rbi @@ -0,0 +1,102 @@ +# typed: strong + +module OpenAI + module Models + class CreateEmbeddingResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::CreateEmbeddingResponse, OpenAI::Internal::AnyHash) + end + + # The list of embeddings generated by the model. + sig { returns(T::Array[OpenAI::Embedding]) } + attr_accessor :data + + # The name of the model used to generate the embedding. + sig { returns(String) } + attr_accessor :model + + # The object type, which is always "list". + sig { returns(Symbol) } + attr_accessor :object + + # The usage information for the request. + sig { returns(OpenAI::CreateEmbeddingResponse::Usage) } + attr_reader :usage + + sig { params(usage: OpenAI::CreateEmbeddingResponse::Usage::OrHash).void } + attr_writer :usage + + sig do + params( + data: T::Array[OpenAI::Embedding::OrHash], + model: String, + usage: OpenAI::CreateEmbeddingResponse::Usage::OrHash, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The list of embeddings generated by the model. + data:, + # The name of the model used to generate the embedding. + model:, + # The usage information for the request. + usage:, + # The object type, which is always "list". + object: :list + ) + end + + sig do + override.returns( + { + data: T::Array[OpenAI::Embedding], + model: String, + object: Symbol, + usage: OpenAI::CreateEmbeddingResponse::Usage + } + ) + end + def to_hash + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::CreateEmbeddingResponse::Usage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens used by the prompt. + sig { returns(Integer) } + attr_accessor :prompt_tokens + + # The total number of tokens used by the request. + sig { returns(Integer) } + attr_accessor :total_tokens + + # The usage information for the request. + sig do + params(prompt_tokens: Integer, total_tokens: Integer).returns( + T.attached_class + ) + end + def self.new( + # The number of tokens used by the prompt. + prompt_tokens:, + # The total number of tokens used by the request. + total_tokens: + ) + end + + sig do + override.returns({ prompt_tokens: Integer, total_tokens: Integer }) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/custom_tool_input_format.rbi b/rbi/openai/models/custom_tool_input_format.rbi new file mode 100644 index 00000000..d2f57068 --- /dev/null +++ b/rbi/openai/models/custom_tool_input_format.rbi @@ -0,0 +1,136 @@ +# typed: strong + +module OpenAI + module Models + # The input format for the custom tool. Default is unconstrained text. + module CustomToolInputFormat + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::CustomToolInputFormat::Text, + OpenAI::CustomToolInputFormat::Grammar + ) + end + + class Text < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::CustomToolInputFormat::Text, + OpenAI::Internal::AnyHash + ) + end + + # Unconstrained text format. Always `text`. + sig { returns(Symbol) } + attr_accessor :type + + # Unconstrained free-form text. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Unconstrained text format. Always `text`. + type: :text + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + class Grammar < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::CustomToolInputFormat::Grammar, + OpenAI::Internal::AnyHash + ) + end + + # The grammar definition. + sig { returns(String) } + attr_accessor :definition + + # The syntax of the grammar definition. One of `lark` or `regex`. + sig do + returns(OpenAI::CustomToolInputFormat::Grammar::Syntax::OrSymbol) + end + attr_accessor :syntax + + # Grammar format. Always `grammar`. + sig { returns(Symbol) } + attr_accessor :type + + # A grammar defined by the user. + sig do + params( + definition: String, + syntax: OpenAI::CustomToolInputFormat::Grammar::Syntax::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The grammar definition. + definition:, + # The syntax of the grammar definition. One of `lark` or `regex`. + syntax:, + # Grammar format. Always `grammar`. + type: :grammar + ) + end + + sig do + override.returns( + { + definition: String, + syntax: OpenAI::CustomToolInputFormat::Grammar::Syntax::OrSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The syntax of the grammar definition. One of `lark` or `regex`. + module Syntax + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::CustomToolInputFormat::Grammar::Syntax) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LARK = + T.let( + :lark, + OpenAI::CustomToolInputFormat::Grammar::Syntax::TaggedSymbol + ) + REGEX = + T.let( + :regex, + OpenAI::CustomToolInputFormat::Grammar::Syntax::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::CustomToolInputFormat::Grammar::Syntax::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns(T::Array[OpenAI::CustomToolInputFormat::Variants]) + end + def self.variants + end + end + end +end diff --git a/rbi/openai/models/embedding.rbi b/rbi/openai/models/embedding.rbi new file mode 100644 index 00000000..173bc91d --- /dev/null +++ b/rbi/openai/models/embedding.rbi @@ -0,0 +1,52 @@ +# typed: strong + +module OpenAI + module Models + class Embedding < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::Embedding, OpenAI::Internal::AnyHash) } + + # The embedding vector, which is a list of floats. The length of vector depends on + # the model as listed in the + # [embedding guide](https://platform.openai.com/docs/guides/embeddings). + sig { returns(T::Array[Float]) } + attr_accessor :embedding + + # The index of the embedding in the list of embeddings. + sig { returns(Integer) } + attr_accessor :index + + # The object type, which is always "embedding". + sig { returns(Symbol) } + attr_accessor :object + + # Represents an embedding vector returned by embedding endpoint. + sig do + params( + embedding: T::Array[Float], + index: Integer, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The embedding vector, which is a list of floats. The length of vector depends on + # the model as listed in the + # [embedding guide](https://platform.openai.com/docs/guides/embeddings). + embedding:, + # The index of the embedding in the list of embeddings. + index:, + # The object type, which is always "embedding". + object: :embedding + ) + end + + sig do + override.returns( + { embedding: T::Array[Float], index: Integer, object: Symbol } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/embedding_create_params.rbi b/rbi/openai/models/embedding_create_params.rbi new file mode 100644 index 00000000..9d60fac5 --- /dev/null +++ b/rbi/openai/models/embedding_create_params.rbi @@ -0,0 +1,231 @@ +# typed: strong + +module OpenAI + module Models + class EmbeddingCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::EmbeddingCreateParams, OpenAI::Internal::AnyHash) + end + + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # all embedding models), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. In addition to the per-input token limit, all embedding + # models enforce a maximum of 300,000 tokens summed across all inputs in a single + # request. + sig { returns(OpenAI::EmbeddingCreateParams::Input::Variants) } + attr_accessor :input + + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + sig { returns(T.any(String, OpenAI::EmbeddingModel::OrSymbol)) } + attr_accessor :model + + # The number of dimensions the resulting output embeddings should have. Only + # supported in `text-embedding-3` and later models. + sig { returns(T.nilable(Integer)) } + attr_reader :dimensions + + sig { params(dimensions: Integer).void } + attr_writer :dimensions + + # The format to return the embeddings in. Can be either `float` or + # [`base64`](https://pypi.org/project/pybase64/). + sig do + returns( + T.nilable(OpenAI::EmbeddingCreateParams::EncodingFormat::OrSymbol) + ) + end + attr_reader :encoding_format + + sig do + params( + encoding_format: + OpenAI::EmbeddingCreateParams::EncodingFormat::OrSymbol + ).void + end + attr_writer :encoding_format + + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + sig { returns(T.nilable(String)) } + attr_reader :user + + sig { params(user: String).void } + attr_writer :user + + sig do + params( + input: OpenAI::EmbeddingCreateParams::Input::Variants, + model: T.any(String, OpenAI::EmbeddingModel::OrSymbol), + dimensions: Integer, + encoding_format: + OpenAI::EmbeddingCreateParams::EncodingFormat::OrSymbol, + user: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # all embedding models), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. In addition to the per-input token limit, all embedding + # models enforce a maximum of 300,000 tokens summed across all inputs in a single + # request. + input:, + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model:, + # The number of dimensions the resulting output embeddings should have. Only + # supported in `text-embedding-3` and later models. + dimensions: nil, + # The format to return the embeddings in. Can be either `float` or + # [`base64`](https://pypi.org/project/pybase64/). + encoding_format: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + input: OpenAI::EmbeddingCreateParams::Input::Variants, + model: T.any(String, OpenAI::EmbeddingModel::OrSymbol), + dimensions: Integer, + encoding_format: + OpenAI::EmbeddingCreateParams::EncodingFormat::OrSymbol, + user: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # all embedding models), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. In addition to the per-input token limit, all embedding + # models enforce a maximum of 300,000 tokens summed across all inputs in a single + # request. + module Input + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[String], + T::Array[Integer], + T::Array[T::Array[Integer]] + ) + end + + sig do + override.returns( + T::Array[OpenAI::EmbeddingCreateParams::Input::Variants] + ) + end + def self.variants + end + + StringArray = + T.let( + OpenAI::Internal::Type::ArrayOf[String], + OpenAI::Internal::Type::Converter + ) + + IntegerArray = + T.let( + OpenAI::Internal::Type::ArrayOf[Integer], + OpenAI::Internal::Type::Converter + ) + + ArrayOfToken2DArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::ArrayOf[Integer] + ], + OpenAI::Internal::Type::Converter + ) + end + + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::EmbeddingModel::TaggedSymbol) } + + sig do + override.returns( + T::Array[OpenAI::EmbeddingCreateParams::Model::Variants] + ) + end + def self.variants + end + end + + # The format to return the embeddings in. Can be either `float` or + # [`base64`](https://pypi.org/project/pybase64/). + module EncodingFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::EmbeddingCreateParams::EncodingFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + FLOAT = + T.let( + :float, + OpenAI::EmbeddingCreateParams::EncodingFormat::TaggedSymbol + ) + BASE64 = + T.let( + :base64, + OpenAI::EmbeddingCreateParams::EncodingFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::EmbeddingCreateParams::EncodingFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/embedding_model.rbi b/rbi/openai/models/embedding_model.rbi new file mode 100644 index 00000000..42bc7b6f --- /dev/null +++ b/rbi/openai/models/embedding_model.rbi @@ -0,0 +1,23 @@ +# typed: strong + +module OpenAI + module Models + module EmbeddingModel + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::EmbeddingModel) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT_EMBEDDING_ADA_002 = + T.let(:"text-embedding-ada-002", OpenAI::EmbeddingModel::TaggedSymbol) + TEXT_EMBEDDING_3_SMALL = + T.let(:"text-embedding-3-small", OpenAI::EmbeddingModel::TaggedSymbol) + TEXT_EMBEDDING_3_LARGE = + T.let(:"text-embedding-3-large", OpenAI::EmbeddingModel::TaggedSymbol) + + sig { override.returns(T::Array[OpenAI::EmbeddingModel::TaggedSymbol]) } + def self.values + end + end + end +end diff --git a/rbi/openai/models/error_object.rbi b/rbi/openai/models/error_object.rbi new file mode 100644 index 00000000..8cc3cf23 --- /dev/null +++ b/rbi/openai/models/error_object.rbi @@ -0,0 +1,46 @@ +# typed: strong + +module OpenAI + module Models + class ErrorObject < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::ErrorObject, OpenAI::Internal::AnyHash) } + + sig { returns(T.nilable(String)) } + attr_accessor :code + + sig { returns(String) } + attr_accessor :message + + sig { returns(T.nilable(String)) } + attr_accessor :param + + sig { returns(String) } + attr_accessor :type + + sig do + params( + code: T.nilable(String), + message: String, + param: T.nilable(String), + type: String + ).returns(T.attached_class) + end + def self.new(code:, message:, param:, type:) + end + + sig do + override.returns( + { + code: T.nilable(String), + message: String, + param: T.nilable(String), + type: String + } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/eval_create_params.rbi b/rbi/openai/models/eval_create_params.rbi new file mode 100644 index 00000000..379e276b --- /dev/null +++ b/rbi/openai/models/eval_create_params.rbi @@ -0,0 +1,875 @@ +# typed: strong + +module OpenAI + module Models + class EvalCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::EvalCreateParams, OpenAI::Internal::AnyHash) + end + + # The configuration for the data source used for the evaluation runs. Dictates the + # schema of the data used in the evaluation. + sig do + returns( + T.any( + OpenAI::EvalCreateParams::DataSourceConfig::Custom, + OpenAI::EvalCreateParams::DataSourceConfig::Logs, + OpenAI::EvalCreateParams::DataSourceConfig::StoredCompletions + ) + ) + end + attr_accessor :data_source_config + + # A list of graders for all eval runs in this group. Graders can reference + # variables in the data source using double curly braces notation, like + # `{{item.variable_name}}`. To reference the model's output, use the `sample` + # namespace (ie, `{{sample.output_text}}`). + sig do + returns( + T::Array[ + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel, + OpenAI::Graders::StringCheckGrader, + OpenAI::EvalCreateParams::TestingCriterion::TextSimilarity, + OpenAI::EvalCreateParams::TestingCriterion::Python, + OpenAI::EvalCreateParams::TestingCriterion::ScoreModel + ) + ] + ) + end + attr_accessor :testing_criteria + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The name of the evaluation. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + sig do + params( + data_source_config: + T.any( + OpenAI::EvalCreateParams::DataSourceConfig::Custom::OrHash, + OpenAI::EvalCreateParams::DataSourceConfig::Logs::OrHash, + OpenAI::EvalCreateParams::DataSourceConfig::StoredCompletions::OrHash + ), + testing_criteria: + T::Array[ + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::OrHash, + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::EvalCreateParams::TestingCriterion::TextSimilarity::OrHash, + OpenAI::EvalCreateParams::TestingCriterion::Python::OrHash, + OpenAI::EvalCreateParams::TestingCriterion::ScoreModel::OrHash + ) + ], + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The configuration for the data source used for the evaluation runs. Dictates the + # schema of the data used in the evaluation. + data_source_config:, + # A list of graders for all eval runs in this group. Graders can reference + # variables in the data source using double curly braces notation, like + # `{{item.variable_name}}`. To reference the model's output, use the `sample` + # namespace (ie, `{{sample.output_text}}`). + testing_criteria:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the evaluation. + name: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + data_source_config: + T.any( + OpenAI::EvalCreateParams::DataSourceConfig::Custom, + OpenAI::EvalCreateParams::DataSourceConfig::Logs, + OpenAI::EvalCreateParams::DataSourceConfig::StoredCompletions + ), + testing_criteria: + T::Array[ + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel, + OpenAI::Graders::StringCheckGrader, + OpenAI::EvalCreateParams::TestingCriterion::TextSimilarity, + OpenAI::EvalCreateParams::TestingCriterion::Python, + OpenAI::EvalCreateParams::TestingCriterion::ScoreModel + ) + ], + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The configuration for the data source used for the evaluation runs. Dictates the + # schema of the data used in the evaluation. + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::DataSourceConfig::Custom, + OpenAI::EvalCreateParams::DataSourceConfig::Logs, + OpenAI::EvalCreateParams::DataSourceConfig::StoredCompletions + ) + end + + class Custom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::DataSourceConfig::Custom, + OpenAI::Internal::AnyHash + ) + end + + # The json schema for each row in the data source. + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :item_schema + + # The type of data source. Always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # Whether the eval should expect you to populate the sample namespace (ie, by + # generating responses off of your data source) + sig { returns(T.nilable(T::Boolean)) } + attr_reader :include_sample_schema + + sig { params(include_sample_schema: T::Boolean).void } + attr_writer :include_sample_schema + + # A CustomDataSourceConfig object that defines the schema for the data source used + # for the evaluation runs. This schema is used to define the shape of the data + # that will be: + # + # - Used to define your testing criteria and + # - What data is required when creating a run + sig do + params( + item_schema: T::Hash[Symbol, T.anything], + include_sample_schema: T::Boolean, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The json schema for each row in the data source. + item_schema:, + # Whether the eval should expect you to populate the sample namespace (ie, by + # generating responses off of your data source) + include_sample_schema: nil, + # The type of data source. Always `custom`. + type: :custom + ) + end + + sig do + override.returns( + { + item_schema: T::Hash[Symbol, T.anything], + type: Symbol, + include_sample_schema: T::Boolean + } + ) + end + def to_hash + end + end + + class Logs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::DataSourceConfig::Logs, + OpenAI::Internal::AnyHash + ) + end + + # The type of data source. Always `logs`. + sig { returns(Symbol) } + attr_accessor :type + + # Metadata filters for the logs data source. + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :metadata + + sig { params(metadata: T::Hash[Symbol, T.anything]).void } + attr_writer :metadata + + # A data source config which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + sig do + params(metadata: T::Hash[Symbol, T.anything], type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # Metadata filters for the logs data source. + metadata: nil, + # The type of data source. Always `logs`. + type: :logs + ) + end + + sig do + override.returns( + { type: Symbol, metadata: T::Hash[Symbol, T.anything] } + ) + end + def to_hash + end + end + + class StoredCompletions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::DataSourceConfig::StoredCompletions, + OpenAI::Internal::AnyHash + ) + end + + # The type of data source. Always `stored_completions`. + sig { returns(Symbol) } + attr_accessor :type + + # Metadata filters for the stored completions data source. + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :metadata + + sig { params(metadata: T::Hash[Symbol, T.anything]).void } + attr_writer :metadata + + # Deprecated in favor of LogsDataSourceConfig. + sig do + params(metadata: T::Hash[Symbol, T.anything], type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # Metadata filters for the stored completions data source. + metadata: nil, + # The type of data source. Always `stored_completions`. + type: :stored_completions + ) + end + + sig do + override.returns( + { type: Symbol, metadata: T::Hash[Symbol, T.anything] } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[OpenAI::EvalCreateParams::DataSourceConfig::Variants] + ) + end + def self.variants + end + end + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + module TestingCriterion + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel, + OpenAI::Graders::StringCheckGrader, + OpenAI::EvalCreateParams::TestingCriterion::TextSimilarity, + OpenAI::EvalCreateParams::TestingCriterion::Python, + OpenAI::EvalCreateParams::TestingCriterion::ScoreModel + ) + end + + class LabelModel < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel, + OpenAI::Internal::AnyHash + ) + end + + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + sig do + returns( + T::Array[ + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem + ) + ] + ) + end + attr_accessor :input + + # The labels to classify to each item in the evaluation. + sig { returns(T::Array[String]) } + attr_accessor :labels + + # The model to use for the evaluation. Must support structured outputs. + sig { returns(String) } + attr_accessor :model + + # The name of the grader. + sig { returns(String) } + attr_accessor :name + + # The labels that indicate a passing result. Must be a subset of labels. + sig { returns(T::Array[String]) } + attr_accessor :passing_labels + + # The object type, which is always `label_model`. + sig { returns(Symbol) } + attr_accessor :type + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + sig do + params( + input: + T::Array[ + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage::OrHash, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::OrHash + ) + ], + labels: T::Array[String], + model: String, + name: String, + passing_labels: T::Array[String], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + input:, + # The labels to classify to each item in the evaluation. + labels:, + # The model to use for the evaluation. Must support structured outputs. + model:, + # The name of the grader. + name:, + # The labels that indicate a passing result. Must be a subset of labels. + passing_labels:, + # The object type, which is always `label_model`. + type: :label_model + ) + end + + sig do + override.returns( + { + input: + T::Array[ + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem + ) + ], + labels: T::Array[String], + model: String, + name: String, + passing_labels: T::Array[String], + type: Symbol + } + ) + end + def to_hash + end + + # A chat message that makes up the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + module Input + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem + ) + end + + class SimpleInputMessage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message. + sig { returns(String) } + attr_accessor :content + + # The role of the message (e.g. "system", "assistant", "user"). + sig { returns(String) } + attr_accessor :role + + sig do + params(content: String, role: String).returns(T.attached_class) + end + def self.new( + # The content of the message. + content:, + # The role of the message (e.g. "system", "assistant", "user"). + role: + ) + end + + sig { override.returns({ content: String, role: String }) } + def to_hash + end + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem, + OpenAI::Internal::AnyHash + ) + end + + # Inputs to the model - can contain template strings. + sig do + returns( + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage, + T::Array[T.anything] + ) + ) + end + attr_accessor :content + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + sig do + returns( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role::OrSymbol + ) + end + attr_accessor :role + + # The type of the message input. Always `message`. + sig do + returns( + T.nilable( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type::OrSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type::OrSymbol + ).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + sig do + params( + content: + T.any( + String, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText::OrHash, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage::OrHash, + T::Array[T.anything] + ), + role: + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role::OrSymbol, + type: + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Inputs to the model - can contain template strings. + content:, + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + role:, + # The type of the message input. Always `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage, + T::Array[T.anything] + ), + role: + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role::OrSymbol, + type: + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type::OrSymbol + } + ) + end + def to_hash + end + + # Inputs to the model - can contain template strings. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage, + T::Array[T.anything] + ) + end + + class OutputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText, + OpenAI::Internal::AnyHash + ) + end + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + # A text output from the model. + sig do + params(text: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # The text output from the model. + text:, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class InputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage, + OpenAI::Internal::AnyHash + ) + end + + # The URL of the image input. + sig { returns(String) } + attr_accessor :image_url + + # The type of the image input. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig { returns(T.nilable(String)) } + attr_reader :detail + + sig { params(detail: String).void } + attr_writer :detail + + # An image input to the model. + sig do + params( + image_url: String, + detail: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The URL of the image input. + image_url:, + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail: nil, + # The type of the image input. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { image_url: String, type: Symbol, detail: String } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::Variants + ] + ) + end + def self.variants + end + + AnArrayOfInputTextAndInputImageArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::Unknown + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::Variants + ] + ) + end + def self.variants + end + end + end + + class TextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::TestingCriterion::TextSimilarity, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(Float) } + attr_accessor :pass_threshold + + # A TextSimilarityGrader object which grades text based on similarity metrics. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + class Python < OpenAI::Models::Graders::PythonGrader + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::TestingCriterion::Python, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(T.nilable(Float)) } + attr_reader :pass_threshold + + sig { params(pass_threshold: Float).void } + attr_writer :pass_threshold + + # A PythonGrader object that runs a python script on the input. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: nil + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + class ScoreModel < OpenAI::Models::Graders::ScoreModelGrader + OrHash = + T.type_alias do + T.any( + OpenAI::EvalCreateParams::TestingCriterion::ScoreModel, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(T.nilable(Float)) } + attr_reader :pass_threshold + + sig { params(pass_threshold: Float).void } + attr_writer :pass_threshold + + # A ScoreModelGrader object that uses a model to assign a score to the input. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: nil + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[OpenAI::EvalCreateParams::TestingCriterion::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/eval_create_response.rbi b/rbi/openai/models/eval_create_response.rbi new file mode 100644 index 00000000..6513b6f7 --- /dev/null +++ b/rbi/openai/models/eval_create_response.rbi @@ -0,0 +1,330 @@ +# typed: strong + +module OpenAI + module Models + class EvalCreateResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Models::EvalCreateResponse, OpenAI::Internal::AnyHash) + end + + # Unique identifier for the evaluation. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the eval was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Configuration of data sources used in runs of the evaluation. + sig do + returns(OpenAI::Models::EvalCreateResponse::DataSourceConfig::Variants) + end + attr_accessor :data_source_config + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The name of the evaluation. + sig { returns(String) } + attr_accessor :name + + # The object type. + sig { returns(Symbol) } + attr_accessor :object + + # A list of testing criteria. + sig do + returns( + T::Array[ + OpenAI::Models::EvalCreateResponse::TestingCriterion::Variants + ] + ) + end + attr_accessor :testing_criteria + + # An Eval object with a data source config and testing criteria. An Eval + # represents a task to be done for your LLM integration. Like: + # + # - Improve the quality of my chatbot + # - See how well my chatbot handles customer support + # - Check if o4-mini is better at my usecase than gpt-4o + sig do + params( + id: String, + created_at: Integer, + data_source_config: + T.any( + OpenAI::EvalCustomDataSourceConfig::OrHash, + OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs::OrHash, + OpenAI::EvalStoredCompletionsDataSourceConfig::OrHash + ), + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + testing_criteria: + T::Array[ + T.any( + OpenAI::Models::Graders::LabelModelGrader::OrHash, + OpenAI::Models::Graders::StringCheckGrader::OrHash, + OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash, + OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython::OrHash, + OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel::OrHash + ) + ], + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the evaluation. + id:, + # The Unix timestamp (in seconds) for when the eval was created. + created_at:, + # Configuration of data sources used in runs of the evaluation. + data_source_config:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The name of the evaluation. + name:, + # A list of testing criteria. + testing_criteria:, + # The object type. + object: :eval + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data_source_config: + OpenAI::Models::EvalCreateResponse::DataSourceConfig::Variants, + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + object: Symbol, + testing_criteria: + T::Array[ + OpenAI::Models::EvalCreateResponse::TestingCriterion::Variants + ] + } + ) + end + def to_hash + end + + # Configuration of data sources used in runs of the evaluation. + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::EvalCustomDataSourceConfig, + OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs, + OpenAI::EvalStoredCompletionsDataSourceConfig + ) + end + + class Logs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs, + OpenAI::Internal::AnyHash + ) + end + + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :schema + + # The type of data source. Always `logs`. + sig { returns(Symbol) } + attr_accessor :type + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + # schema returned by this data source config is used to defined what variables are + # available in your evals. `item` and `sample` are both defined when using this + # data source config. + sig do + params( + schema: T::Hash[Symbol, T.anything], + metadata: T.nilable(T::Hash[Symbol, String]), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + schema:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The type of data source. Always `logs`. + type: :logs + ) + end + + sig do + override.returns( + { + schema: T::Hash[Symbol, T.anything], + type: Symbol, + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::EvalCreateResponse::DataSourceConfig::Variants + ] + ) + end + def self.variants + end + end + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + module TestingCriterion + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Graders::LabelModelGrader, + OpenAI::Models::Graders::StringCheckGrader, + OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity, + OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython, + OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel + ) + end + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(Float) } + attr_accessor :pass_threshold + + # A TextSimilarityGrader object which grades text based on similarity metrics. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(T.nilable(Float)) } + attr_reader :pass_threshold + + sig { params(pass_threshold: Float).void } + attr_writer :pass_threshold + + # A PythonGrader object that runs a python script on the input. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: nil + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(T.nilable(Float)) } + attr_reader :pass_threshold + + sig { params(pass_threshold: Float).void } + attr_writer :pass_threshold + + # A ScoreModelGrader object that uses a model to assign a score to the input. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: nil + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::EvalCreateResponse::TestingCriterion::Variants + ] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/eval_custom_data_source_config.rbi b/rbi/openai/models/eval_custom_data_source_config.rbi new file mode 100644 index 00000000..e00ae9a0 --- /dev/null +++ b/rbi/openai/models/eval_custom_data_source_config.rbi @@ -0,0 +1,47 @@ +# typed: strong + +module OpenAI + module Models + class EvalCustomDataSourceConfig < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::EvalCustomDataSourceConfig, OpenAI::Internal::AnyHash) + end + + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :schema + + # The type of data source. Always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # A CustomDataSourceConfig which specifies the schema of your `item` and + # optionally `sample` namespaces. The response schema defines the shape of the + # data that will be: + # + # - Used to define your testing criteria and + # - What data is required when creating a run + sig do + params(schema: T::Hash[Symbol, T.anything], type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + schema:, + # The type of data source. Always `custom`. + type: :custom + ) + end + + sig do + override.returns({ schema: T::Hash[Symbol, T.anything], type: Symbol }) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/eval_delete_params.rbi b/rbi/openai/models/eval_delete_params.rbi new file mode 100644 index 00000000..40b7ec64 --- /dev/null +++ b/rbi/openai/models/eval_delete_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class EvalDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::EvalDeleteParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/eval_delete_response.rbi b/rbi/openai/models/eval_delete_response.rbi new file mode 100644 index 00000000..e324447f --- /dev/null +++ b/rbi/openai/models/eval_delete_response.rbi @@ -0,0 +1,37 @@ +# typed: strong + +module OpenAI + module Models + class EvalDeleteResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Models::EvalDeleteResponse, OpenAI::Internal::AnyHash) + end + + sig { returns(T::Boolean) } + attr_accessor :deleted + + sig { returns(String) } + attr_accessor :eval_id + + sig { returns(String) } + attr_accessor :object + + sig do + params(deleted: T::Boolean, eval_id: String, object: String).returns( + T.attached_class + ) + end + def self.new(deleted:, eval_id:, object:) + end + + sig do + override.returns( + { deleted: T::Boolean, eval_id: String, object: String } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/eval_list_params.rbi b/rbi/openai/models/eval_list_params.rbi new file mode 100644 index 00000000..4b125b67 --- /dev/null +++ b/rbi/openai/models/eval_list_params.rbi @@ -0,0 +1,127 @@ +# typed: strong + +module OpenAI + module Models + class EvalListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::EvalListParams, OpenAI::Internal::AnyHash) + end + + # Identifier for the last eval from the previous pagination request. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Number of evals to retrieve. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for + # descending order. + sig { returns(T.nilable(OpenAI::EvalListParams::Order::OrSymbol)) } + attr_reader :order + + sig { params(order: OpenAI::EvalListParams::Order::OrSymbol).void } + attr_writer :order + + # Evals can be ordered by creation time or last updated time. Use `created_at` for + # creation time or `updated_at` for last updated time. + sig { returns(T.nilable(OpenAI::EvalListParams::OrderBy::OrSymbol)) } + attr_reader :order_by + + sig { params(order_by: OpenAI::EvalListParams::OrderBy::OrSymbol).void } + attr_writer :order_by + + sig do + params( + after: String, + limit: Integer, + order: OpenAI::EvalListParams::Order::OrSymbol, + order_by: OpenAI::EvalListParams::OrderBy::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Identifier for the last eval from the previous pagination request. + after: nil, + # Number of evals to retrieve. + limit: nil, + # Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for + # descending order. + order: nil, + # Evals can be ordered by creation time or last updated time. Use `created_at` for + # creation time or `updated_at` for last updated time. + order_by: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + order: OpenAI::EvalListParams::Order::OrSymbol, + order_by: OpenAI::EvalListParams::OrderBy::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for + # descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::EvalListParams::Order) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = T.let(:asc, OpenAI::EvalListParams::Order::TaggedSymbol) + DESC = T.let(:desc, OpenAI::EvalListParams::Order::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::EvalListParams::Order::TaggedSymbol] + ) + end + def self.values + end + end + + # Evals can be ordered by creation time or last updated time. Use `created_at` for + # creation time or `updated_at` for last updated time. + module OrderBy + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::EvalListParams::OrderBy) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + CREATED_AT = + T.let(:created_at, OpenAI::EvalListParams::OrderBy::TaggedSymbol) + UPDATED_AT = + T.let(:updated_at, OpenAI::EvalListParams::OrderBy::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::EvalListParams::OrderBy::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/eval_list_response.rbi b/rbi/openai/models/eval_list_response.rbi new file mode 100644 index 00000000..1158ef42 --- /dev/null +++ b/rbi/openai/models/eval_list_response.rbi @@ -0,0 +1,328 @@ +# typed: strong + +module OpenAI + module Models + class EvalListResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Models::EvalListResponse, OpenAI::Internal::AnyHash) + end + + # Unique identifier for the evaluation. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the eval was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Configuration of data sources used in runs of the evaluation. + sig do + returns(OpenAI::Models::EvalListResponse::DataSourceConfig::Variants) + end + attr_accessor :data_source_config + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The name of the evaluation. + sig { returns(String) } + attr_accessor :name + + # The object type. + sig { returns(Symbol) } + attr_accessor :object + + # A list of testing criteria. + sig do + returns( + T::Array[OpenAI::Models::EvalListResponse::TestingCriterion::Variants] + ) + end + attr_accessor :testing_criteria + + # An Eval object with a data source config and testing criteria. An Eval + # represents a task to be done for your LLM integration. Like: + # + # - Improve the quality of my chatbot + # - See how well my chatbot handles customer support + # - Check if o4-mini is better at my usecase than gpt-4o + sig do + params( + id: String, + created_at: Integer, + data_source_config: + T.any( + OpenAI::EvalCustomDataSourceConfig::OrHash, + OpenAI::Models::EvalListResponse::DataSourceConfig::Logs::OrHash, + OpenAI::EvalStoredCompletionsDataSourceConfig::OrHash + ), + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + testing_criteria: + T::Array[ + T.any( + OpenAI::Models::Graders::LabelModelGrader::OrHash, + OpenAI::Models::Graders::StringCheckGrader::OrHash, + OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash, + OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython::OrHash, + OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel::OrHash + ) + ], + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the evaluation. + id:, + # The Unix timestamp (in seconds) for when the eval was created. + created_at:, + # Configuration of data sources used in runs of the evaluation. + data_source_config:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The name of the evaluation. + name:, + # A list of testing criteria. + testing_criteria:, + # The object type. + object: :eval + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data_source_config: + OpenAI::Models::EvalListResponse::DataSourceConfig::Variants, + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + object: Symbol, + testing_criteria: + T::Array[ + OpenAI::Models::EvalListResponse::TestingCriterion::Variants + ] + } + ) + end + def to_hash + end + + # Configuration of data sources used in runs of the evaluation. + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::EvalCustomDataSourceConfig, + OpenAI::Models::EvalListResponse::DataSourceConfig::Logs, + OpenAI::EvalStoredCompletionsDataSourceConfig + ) + end + + class Logs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalListResponse::DataSourceConfig::Logs, + OpenAI::Internal::AnyHash + ) + end + + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :schema + + # The type of data source. Always `logs`. + sig { returns(Symbol) } + attr_accessor :type + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + # schema returned by this data source config is used to defined what variables are + # available in your evals. `item` and `sample` are both defined when using this + # data source config. + sig do + params( + schema: T::Hash[Symbol, T.anything], + metadata: T.nilable(T::Hash[Symbol, String]), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + schema:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The type of data source. Always `logs`. + type: :logs + ) + end + + sig do + override.returns( + { + schema: T::Hash[Symbol, T.anything], + type: Symbol, + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::EvalListResponse::DataSourceConfig::Variants + ] + ) + end + def self.variants + end + end + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + module TestingCriterion + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Graders::LabelModelGrader, + OpenAI::Models::Graders::StringCheckGrader, + OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity, + OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython, + OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel + ) + end + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(Float) } + attr_accessor :pass_threshold + + # A TextSimilarityGrader object which grades text based on similarity metrics. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(T.nilable(Float)) } + attr_reader :pass_threshold + + sig { params(pass_threshold: Float).void } + attr_writer :pass_threshold + + # A PythonGrader object that runs a python script on the input. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: nil + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(T.nilable(Float)) } + attr_reader :pass_threshold + + sig { params(pass_threshold: Float).void } + attr_writer :pass_threshold + + # A ScoreModelGrader object that uses a model to assign a score to the input. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: nil + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::EvalListResponse::TestingCriterion::Variants + ] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/eval_retrieve_params.rbi b/rbi/openai/models/eval_retrieve_params.rbi new file mode 100644 index 00000000..f8fd0c57 --- /dev/null +++ b/rbi/openai/models/eval_retrieve_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class EvalRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::EvalRetrieveParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/eval_retrieve_response.rbi b/rbi/openai/models/eval_retrieve_response.rbi new file mode 100644 index 00000000..66b5570d --- /dev/null +++ b/rbi/openai/models/eval_retrieve_response.rbi @@ -0,0 +1,332 @@ +# typed: strong + +module OpenAI + module Models + class EvalRetrieveResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Models::EvalRetrieveResponse, OpenAI::Internal::AnyHash) + end + + # Unique identifier for the evaluation. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the eval was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Configuration of data sources used in runs of the evaluation. + sig do + returns( + OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Variants + ) + end + attr_accessor :data_source_config + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The name of the evaluation. + sig { returns(String) } + attr_accessor :name + + # The object type. + sig { returns(Symbol) } + attr_accessor :object + + # A list of testing criteria. + sig do + returns( + T::Array[ + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::Variants + ] + ) + end + attr_accessor :testing_criteria + + # An Eval object with a data source config and testing criteria. An Eval + # represents a task to be done for your LLM integration. Like: + # + # - Improve the quality of my chatbot + # - See how well my chatbot handles customer support + # - Check if o4-mini is better at my usecase than gpt-4o + sig do + params( + id: String, + created_at: Integer, + data_source_config: + T.any( + OpenAI::EvalCustomDataSourceConfig::OrHash, + OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs::OrHash, + OpenAI::EvalStoredCompletionsDataSourceConfig::OrHash + ), + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + testing_criteria: + T::Array[ + T.any( + OpenAI::Models::Graders::LabelModelGrader::OrHash, + OpenAI::Models::Graders::StringCheckGrader::OrHash, + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash, + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython::OrHash, + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel::OrHash + ) + ], + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the evaluation. + id:, + # The Unix timestamp (in seconds) for when the eval was created. + created_at:, + # Configuration of data sources used in runs of the evaluation. + data_source_config:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The name of the evaluation. + name:, + # A list of testing criteria. + testing_criteria:, + # The object type. + object: :eval + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data_source_config: + OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Variants, + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + object: Symbol, + testing_criteria: + T::Array[ + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::Variants + ] + } + ) + end + def to_hash + end + + # Configuration of data sources used in runs of the evaluation. + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::EvalCustomDataSourceConfig, + OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs, + OpenAI::EvalStoredCompletionsDataSourceConfig + ) + end + + class Logs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs, + OpenAI::Internal::AnyHash + ) + end + + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :schema + + # The type of data source. Always `logs`. + sig { returns(Symbol) } + attr_accessor :type + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + # schema returned by this data source config is used to defined what variables are + # available in your evals. `item` and `sample` are both defined when using this + # data source config. + sig do + params( + schema: T::Hash[Symbol, T.anything], + metadata: T.nilable(T::Hash[Symbol, String]), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + schema:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The type of data source. Always `logs`. + type: :logs + ) + end + + sig do + override.returns( + { + schema: T::Hash[Symbol, T.anything], + type: Symbol, + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Variants + ] + ) + end + def self.variants + end + end + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + module TestingCriterion + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Graders::LabelModelGrader, + OpenAI::Models::Graders::StringCheckGrader, + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity, + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython, + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel + ) + end + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(Float) } + attr_accessor :pass_threshold + + # A TextSimilarityGrader object which grades text based on similarity metrics. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(T.nilable(Float)) } + attr_reader :pass_threshold + + sig { params(pass_threshold: Float).void } + attr_writer :pass_threshold + + # A PythonGrader object that runs a python script on the input. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: nil + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(T.nilable(Float)) } + attr_reader :pass_threshold + + sig { params(pass_threshold: Float).void } + attr_writer :pass_threshold + + # A ScoreModelGrader object that uses a model to assign a score to the input. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: nil + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::EvalRetrieveResponse::TestingCriterion::Variants + ] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/eval_stored_completions_data_source_config.rbi b/rbi/openai/models/eval_stored_completions_data_source_config.rbi new file mode 100644 index 00000000..013b469d --- /dev/null +++ b/rbi/openai/models/eval_stored_completions_data_source_config.rbi @@ -0,0 +1,69 @@ +# typed: strong + +module OpenAI + module Models + class EvalStoredCompletionsDataSourceConfig < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::EvalStoredCompletionsDataSourceConfig, + OpenAI::Internal::AnyHash + ) + end + + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :schema + + # The type of data source. Always `stored_completions`. + sig { returns(Symbol) } + attr_accessor :type + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # Deprecated in favor of LogsDataSourceConfig. + sig do + params( + schema: T::Hash[Symbol, T.anything], + metadata: T.nilable(T::Hash[Symbol, String]), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + schema:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The type of data source. Always `stored_completions`. + type: :stored_completions + ) + end + + sig do + override.returns( + { + schema: T::Hash[Symbol, T.anything], + type: Symbol, + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/eval_update_params.rbi b/rbi/openai/models/eval_update_params.rbi new file mode 100644 index 00000000..e3e760e6 --- /dev/null +++ b/rbi/openai/models/eval_update_params.rbi @@ -0,0 +1,64 @@ +# typed: strong + +module OpenAI + module Models + class EvalUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::EvalUpdateParams, OpenAI::Internal::AnyHash) + end + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # Rename the evaluation. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + sig do + params( + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # Rename the evaluation. + name: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/eval_update_response.rbi b/rbi/openai/models/eval_update_response.rbi new file mode 100644 index 00000000..07d46351 --- /dev/null +++ b/rbi/openai/models/eval_update_response.rbi @@ -0,0 +1,330 @@ +# typed: strong + +module OpenAI + module Models + class EvalUpdateResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Models::EvalUpdateResponse, OpenAI::Internal::AnyHash) + end + + # Unique identifier for the evaluation. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the eval was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Configuration of data sources used in runs of the evaluation. + sig do + returns(OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Variants) + end + attr_accessor :data_source_config + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The name of the evaluation. + sig { returns(String) } + attr_accessor :name + + # The object type. + sig { returns(Symbol) } + attr_accessor :object + + # A list of testing criteria. + sig do + returns( + T::Array[ + OpenAI::Models::EvalUpdateResponse::TestingCriterion::Variants + ] + ) + end + attr_accessor :testing_criteria + + # An Eval object with a data source config and testing criteria. An Eval + # represents a task to be done for your LLM integration. Like: + # + # - Improve the quality of my chatbot + # - See how well my chatbot handles customer support + # - Check if o4-mini is better at my usecase than gpt-4o + sig do + params( + id: String, + created_at: Integer, + data_source_config: + T.any( + OpenAI::EvalCustomDataSourceConfig::OrHash, + OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs::OrHash, + OpenAI::EvalStoredCompletionsDataSourceConfig::OrHash + ), + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + testing_criteria: + T::Array[ + T.any( + OpenAI::Models::Graders::LabelModelGrader::OrHash, + OpenAI::Models::Graders::StringCheckGrader::OrHash, + OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash, + OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython::OrHash, + OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel::OrHash + ) + ], + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the evaluation. + id:, + # The Unix timestamp (in seconds) for when the eval was created. + created_at:, + # Configuration of data sources used in runs of the evaluation. + data_source_config:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The name of the evaluation. + name:, + # A list of testing criteria. + testing_criteria:, + # The object type. + object: :eval + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data_source_config: + OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Variants, + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + object: Symbol, + testing_criteria: + T::Array[ + OpenAI::Models::EvalUpdateResponse::TestingCriterion::Variants + ] + } + ) + end + def to_hash + end + + # Configuration of data sources used in runs of the evaluation. + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::EvalCustomDataSourceConfig, + OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs, + OpenAI::EvalStoredCompletionsDataSourceConfig + ) + end + + class Logs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs, + OpenAI::Internal::AnyHash + ) + end + + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :schema + + # The type of data source. Always `logs`. + sig { returns(Symbol) } + attr_accessor :type + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # A LogsDataSourceConfig which specifies the metadata property of your logs query. + # This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + # schema returned by this data source config is used to defined what variables are + # available in your evals. `item` and `sample` are both defined when using this + # data source config. + sig do + params( + schema: T::Hash[Symbol, T.anything], + metadata: T.nilable(T::Hash[Symbol, String]), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The json schema for the run data source items. Learn how to build JSON schemas + # [here](https://json-schema.org/). + schema:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The type of data source. Always `logs`. + type: :logs + ) + end + + sig do + override.returns( + { + schema: T::Hash[Symbol, T.anything], + type: Symbol, + metadata: T.nilable(T::Hash[Symbol, String]) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Variants + ] + ) + end + def self.variants + end + end + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + module TestingCriterion + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Graders::LabelModelGrader, + OpenAI::Models::Graders::StringCheckGrader, + OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity, + OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython, + OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel + ) + end + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(Float) } + attr_accessor :pass_threshold + + # A TextSimilarityGrader object which grades text based on similarity metrics. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(T.nilable(Float)) } + attr_reader :pass_threshold + + sig { params(pass_threshold: Float).void } + attr_writer :pass_threshold + + # A PythonGrader object that runs a python script on the input. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: nil + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + OrHash = + T.type_alias do + T.any( + OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel, + OpenAI::Internal::AnyHash + ) + end + + # The threshold for the score. + sig { returns(T.nilable(Float)) } + attr_reader :pass_threshold + + sig { params(pass_threshold: Float).void } + attr_writer :pass_threshold + + # A ScoreModelGrader object that uses a model to assign a score to the input. + sig { params(pass_threshold: Float).returns(T.attached_class) } + def self.new( + # The threshold for the score. + pass_threshold: nil + ) + end + + sig { override.returns({ pass_threshold: Float }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::EvalUpdateResponse::TestingCriterion::Variants + ] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi b/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi new file mode 100644 index 00000000..63682eda --- /dev/null +++ b/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi @@ -0,0 +1,1057 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class CreateEvalCompletionsRunDataSource < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource, + OpenAI::Internal::AnyHash + ) + end + + # Determines what populates the `item` namespace in this run's data source. + sig do + returns( + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileID, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions + ) + ) + end + attr_accessor :source + + # The type of run data source. Always `completions`. + sig do + returns( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Type::OrSymbol + ) + end + attr_accessor :type + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + sig do + returns( + T.nilable( + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference + ) + ) + ) + end + attr_reader :input_messages + + sig do + params( + input_messages: + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference::OrHash + ) + ).void + end + attr_writer :input_messages + + # The name of the model to use for generating completions (e.g. "o3-mini"). + sig { returns(T.nilable(String)) } + attr_reader :model + + sig { params(model: String).void } + attr_writer :model + + sig do + returns( + T.nilable( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams + ) + ) + end + attr_reader :sampling_params + + sig do + params( + sampling_params: + OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::OrHash + ).void + end + attr_writer :sampling_params + + # A CompletionsRunDataSource object describing a model sampling configuration. + sig do + params( + source: + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileID::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions::OrHash + ), + type: + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Type::OrSymbol, + input_messages: + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference::OrHash + ), + model: String, + sampling_params: + OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::OrHash + ).returns(T.attached_class) + end + def self.new( + # Determines what populates the `item` namespace in this run's data source. + source:, + # The type of run data source. Always `completions`. + type:, + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + input_messages: nil, + # The name of the model to use for generating completions (e.g. "o3-mini"). + model: nil, + sampling_params: nil + ) + end + + sig do + override.returns( + { + source: + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileID, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions + ), + type: + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Type::OrSymbol, + input_messages: + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference + ), + model: String, + sampling_params: + OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams + } + ) + end + def to_hash + end + + # Determines what populates the `item` namespace in this run's data source. + module Source + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileID, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions + ) + end + + class FileContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent, + OpenAI::Internal::AnyHash + ) + end + + # The content of the jsonl file. + sig do + returns( + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content + ] + ) + end + attr_accessor :content + + # The type of jsonl source. Always `file_content`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + content: + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content::OrHash + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The content of the jsonl file. + content:, + # The type of jsonl source. Always `file_content`. + type: :file_content + ) + end + + sig do + override.returns( + { + content: + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content + ], + type: Symbol + } + ) + end + def to_hash + end + + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :item + + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :sample + + sig { params(sample: T::Hash[Symbol, T.anything]).void } + attr_writer :sample + + sig do + params( + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + ).returns(T.attached_class) + end + def self.new(item:, sample: nil) + end + + sig do + override.returns( + { + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + } + ) + end + def to_hash + end + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileID, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the file. + sig { returns(String) } + attr_accessor :id + + # The type of jsonl source. Always `file_id`. + sig { returns(Symbol) } + attr_accessor :type + + sig { params(id: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The identifier of the file. + id:, + # The type of jsonl source. Always `file_id`. + type: :file_id + ) + end + + sig { override.returns({ id: String, type: Symbol }) } + def to_hash + end + end + + class StoredCompletions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions, + OpenAI::Internal::AnyHash + ) + end + + # The type of source. Always `stored_completions`. + sig { returns(Symbol) } + attr_accessor :type + + # An optional Unix timestamp to filter items created after this time. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_after + + # An optional Unix timestamp to filter items created before this time. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_before + + # An optional maximum number of items to return. + sig { returns(T.nilable(Integer)) } + attr_accessor :limit + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # An optional model to filter by (e.g., 'gpt-4o'). + sig { returns(T.nilable(String)) } + attr_accessor :model + + # A StoredCompletionsRunDataSource configuration describing a set of filters + sig do + params( + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + limit: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # An optional Unix timestamp to filter items created after this time. + created_after: nil, + # An optional Unix timestamp to filter items created before this time. + created_before: nil, + # An optional maximum number of items to return. + limit: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # An optional model to filter by (e.g., 'gpt-4o'). + model: nil, + # The type of source. Always `stored_completions`. + type: :stored_completions + ) + end + + sig do + override.returns( + { + type: Symbol, + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + limit: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: T.nilable(String) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::Variants + ] + ) + end + def self.variants + end + end + + # The type of run data source. Always `completions`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + COMPLETIONS = + T.let( + :completions, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + module InputMessages + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference + ) + end + + class Template < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template, + OpenAI::Internal::AnyHash + ) + end + + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + sig do + returns( + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem + ) + ] + ) + end + attr_accessor :template + + # The type of input messages. Always `template`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + template: + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::OrHash + ) + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + template:, + # The type of input messages. Always `template`. + type: :template + ) + end + + sig do + override.returns( + { + template: + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem + ) + ], + type: Symbol + } + ) + end + def to_hash + end + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::EasyInputMessage, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem + ) + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem, + OpenAI::Internal::AnyHash + ) + end + + # Inputs to the model - can contain template strings. + sig do + returns( + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, + T::Array[T.anything] + ) + ) + end + attr_accessor :content + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + sig do + returns( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role::OrSymbol + ) + end + attr_accessor :role + + # The type of the message input. Always `message`. + sig do + returns( + T.nilable( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + sig do + params( + content: + T.any( + String, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage::OrHash, + T::Array[T.anything] + ), + role: + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role::OrSymbol, + type: + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Inputs to the model - can contain template strings. + content:, + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + role:, + # The type of the message input. Always `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, + T::Array[T.anything] + ), + role: + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role::OrSymbol, + type: + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Type::OrSymbol + } + ) + end + def to_hash + end + + # Inputs to the model - can contain template strings. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, + T::Array[T.anything] + ) + end + + class OutputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Internal::AnyHash + ) + end + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + # A text output from the model. + sig do + params(text: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The text output from the model. + text:, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class InputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, + OpenAI::Internal::AnyHash + ) + end + + # The URL of the image input. + sig { returns(String) } + attr_accessor :image_url + + # The type of the image input. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig { returns(T.nilable(String)) } + attr_reader :detail + + sig { params(detail: String).void } + attr_writer :detail + + # An image input to the model. + sig do + params( + image_url: String, + detail: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The URL of the image input. + image_url:, + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail: nil, + # The type of the image input. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { image_url: String, type: Symbol, detail: String } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::Variants + ] + ) + end + def self.variants + end + + AnArrayOfInputTextAndInputImageArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::Unknown + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Variants + ] + ) + end + def self.variants + end + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference, + OpenAI::Internal::AnyHash + ) + end + + # A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" + sig { returns(String) } + attr_accessor :item_reference + + # The type of input messages. Always `item_reference`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(item_reference: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" + item_reference:, + # The type of input messages. Always `item_reference`. + type: :item_reference + ) + end + + sig { override.returns({ item_reference: String, type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Variants + ] + ) + end + def self.variants + end + end + + class SamplingParams < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams, + OpenAI::Internal::AnyHash + ) + end + + # The maximum number of tokens in the generated output. + sig { returns(T.nilable(Integer)) } + attr_reader :max_completion_tokens + + sig { params(max_completion_tokens: Integer).void } + attr_writer :max_completion_tokens + + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + sig do + returns( + T.nilable( + T.any( + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONSchema, + OpenAI::ResponseFormatJSONObject + ) + ) + ) + end + attr_reader :response_format + + sig do + params( + response_format: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).void + end + attr_writer :response_format + + # A seed value to initialize the randomness, during sampling. + sig { returns(T.nilable(Integer)) } + attr_reader :seed + + sig { params(seed: Integer).void } + attr_writer :seed + + # A higher temperature increases randomness in the outputs. + sig { returns(T.nilable(Float)) } + attr_reader :temperature + + sig { params(temperature: Float).void } + attr_writer :temperature + + # A list of tools the model may call. Currently, only functions are supported as a + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. + sig do + returns( + T.nilable(T::Array[OpenAI::Chat::ChatCompletionFunctionTool]) + ) + end + attr_reader :tools + + sig do + params( + tools: T::Array[OpenAI::Chat::ChatCompletionFunctionTool::OrHash] + ).void + end + attr_writer :tools + + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + sig { returns(T.nilable(Float)) } + attr_reader :top_p + + sig { params(top_p: Float).void } + attr_writer :top_p + + sig do + params( + max_completion_tokens: Integer, + response_format: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ), + seed: Integer, + temperature: Float, + tools: T::Array[OpenAI::Chat::ChatCompletionFunctionTool::OrHash], + top_p: Float + ).returns(T.attached_class) + end + def self.new( + # The maximum number of tokens in the generated output. + max_completion_tokens: nil, + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + response_format: nil, + # A seed value to initialize the randomness, during sampling. + seed: nil, + # A higher temperature increases randomness in the outputs. + temperature: nil, + # A list of tools the model may call. Currently, only functions are supported as a + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. + tools: nil, + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + top_p: nil + ) + end + + sig do + override.returns( + { + max_completion_tokens: Integer, + response_format: + T.any( + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONSchema, + OpenAI::ResponseFormatJSONObject + ), + seed: Integer, + temperature: Float, + tools: T::Array[OpenAI::Chat::ChatCompletionFunctionTool], + top_p: Float + } + ) + end + def to_hash + end + + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + module ResponseFormat + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::ResponseFormatText, + OpenAI::ResponseFormatJSONSchema, + OpenAI::ResponseFormatJSONObject + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::ResponseFormat::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/create_eval_jsonl_run_data_source.rbi b/rbi/openai/models/evals/create_eval_jsonl_run_data_source.rbi new file mode 100644 index 00000000..d72d28b8 --- /dev/null +++ b/rbi/openai/models/evals/create_eval_jsonl_run_data_source.rbi @@ -0,0 +1,215 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class CreateEvalJSONLRunDataSource < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource, + OpenAI::Internal::AnyHash + ) + end + + # Determines what populates the `item` namespace in the data source. + sig do + returns( + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent, + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileID + ) + ) + end + attr_accessor :source + + # The type of data source. Always `jsonl`. + sig { returns(Symbol) } + attr_accessor :type + + # A JsonlRunDataSource object with that specifies a JSONL file that matches the + # eval + sig do + params( + source: + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::OrHash, + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileID::OrHash + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Determines what populates the `item` namespace in the data source. + source:, + # The type of data source. Always `jsonl`. + type: :jsonl + ) + end + + sig do + override.returns( + { + source: + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent, + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileID + ), + type: Symbol + } + ) + end + def to_hash + end + + # Determines what populates the `item` namespace in the data source. + module Source + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent, + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileID + ) + end + + class FileContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent, + OpenAI::Internal::AnyHash + ) + end + + # The content of the jsonl file. + sig do + returns( + T::Array[ + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content + ] + ) + end + attr_accessor :content + + # The type of jsonl source. Always `file_content`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + content: + T::Array[ + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content::OrHash + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The content of the jsonl file. + content:, + # The type of jsonl source. Always `file_content`. + type: :file_content + ) + end + + sig do + override.returns( + { + content: + T::Array[ + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content + ], + type: Symbol + } + ) + end + def to_hash + end + + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :item + + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :sample + + sig { params(sample: T::Hash[Symbol, T.anything]).void } + attr_writer :sample + + sig do + params( + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + ).returns(T.attached_class) + end + def self.new(item:, sample: nil) + end + + sig do + override.returns( + { + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + } + ) + end + def to_hash + end + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileID, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the file. + sig { returns(String) } + attr_accessor :id + + # The type of jsonl source. Always `file_id`. + sig { returns(Symbol) } + attr_accessor :type + + sig { params(id: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The identifier of the file. + id:, + # The type of jsonl source. Always `file_id`. + type: :file_id + ) + end + + sig { override.returns({ id: String, type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/eval_api_error.rbi b/rbi/openai/models/evals/eval_api_error.rbi new file mode 100644 index 00000000..c37beff5 --- /dev/null +++ b/rbi/openai/models/evals/eval_api_error.rbi @@ -0,0 +1,38 @@ +# typed: strong + +module OpenAI + module Models + EvalAPIError = Evals::EvalAPIError + + module Evals + class EvalAPIError < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Evals::EvalAPIError, OpenAI::Internal::AnyHash) + end + + # The error code. + sig { returns(String) } + attr_accessor :code + + # The error message. + sig { returns(String) } + attr_accessor :message + + # An object representing an error response from the Eval API. + sig { params(code: String, message: String).returns(T.attached_class) } + def self.new( + # The error code. + code:, + # The error message. + message: + ) + end + + sig { override.returns({ code: String, message: String }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/evals/run_cancel_params.rbi b/rbi/openai/models/evals/run_cancel_params.rbi new file mode 100644 index 00000000..fdef62a7 --- /dev/null +++ b/rbi/openai/models/evals/run_cancel_params.rbi @@ -0,0 +1,37 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class RunCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Evals::RunCancelParams, OpenAI::Internal::AnyHash) + end + + sig { returns(String) } + attr_accessor :eval_id + + sig do + params( + eval_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(eval_id:, request_options: {}) + end + + sig do + override.returns( + { eval_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/evals/run_cancel_response.rbi b/rbi/openai/models/evals/run_cancel_response.rbi new file mode 100644 index 00000000..13f94289 --- /dev/null +++ b/rbi/openai/models/evals/run_cancel_response.rbi @@ -0,0 +1,1550 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class RunCancelResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the evaluation run. + sig { returns(String) } + attr_accessor :id + + # Unix timestamp (in seconds) when the evaluation run was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Information about the run's data source. + sig do + returns( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Variants + ) + end + attr_accessor :data_source + + # An object representing an error response from the Eval API. + sig { returns(OpenAI::Evals::EvalAPIError) } + attr_reader :error + + sig { params(error: OpenAI::Evals::EvalAPIError::OrHash).void } + attr_writer :error + + # The identifier of the associated evaluation. + sig { returns(String) } + attr_accessor :eval_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The model that is evaluated, if applicable. + sig { returns(String) } + attr_accessor :model + + # The name of the evaluation run. + sig { returns(String) } + attr_accessor :name + + # The type of the object. Always "eval.run". + sig { returns(Symbol) } + attr_accessor :object + + # Usage statistics for each model during the evaluation run. + sig do + returns( + T::Array[OpenAI::Models::Evals::RunCancelResponse::PerModelUsage] + ) + end + attr_accessor :per_model_usage + + # Results per testing criteria applied during the evaluation run. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult + ] + ) + end + attr_accessor :per_testing_criteria_results + + # The URL to the rendered evaluation run report on the UI dashboard. + sig { returns(String) } + attr_accessor :report_url + + # Counters summarizing the outcomes of the evaluation run. + sig { returns(OpenAI::Models::Evals::RunCancelResponse::ResultCounts) } + attr_reader :result_counts + + sig do + params( + result_counts: + OpenAI::Models::Evals::RunCancelResponse::ResultCounts::OrHash + ).void + end + attr_writer :result_counts + + # The status of the evaluation run. + sig { returns(String) } + attr_accessor :status + + # A schema representing an evaluation run. + sig do + params( + id: String, + created_at: Integer, + data_source: + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::OrHash, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::OrHash + ), + error: OpenAI::Evals::EvalAPIError::OrHash, + eval_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + name: String, + per_model_usage: + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::PerModelUsage::OrHash + ], + per_testing_criteria_results: + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult::OrHash + ], + report_url: String, + result_counts: + OpenAI::Models::Evals::RunCancelResponse::ResultCounts::OrHash, + status: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the evaluation run. + id:, + # Unix timestamp (in seconds) when the evaluation run was created. + created_at:, + # Information about the run's data source. + data_source:, + # An object representing an error response from the Eval API. + error:, + # The identifier of the associated evaluation. + eval_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The model that is evaluated, if applicable. + model:, + # The name of the evaluation run. + name:, + # Usage statistics for each model during the evaluation run. + per_model_usage:, + # Results per testing criteria applied during the evaluation run. + per_testing_criteria_results:, + # The URL to the rendered evaluation run report on the UI dashboard. + report_url:, + # Counters summarizing the outcomes of the evaluation run. + result_counts:, + # The status of the evaluation run. + status:, + # The type of the object. Always "eval.run". + object: :"eval.run" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data_source: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Variants, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + name: String, + object: Symbol, + per_model_usage: + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::PerModelUsage + ], + per_testing_criteria_results: + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult + ], + report_url: String, + result_counts: + OpenAI::Models::Evals::RunCancelResponse::ResultCounts, + status: String + } + ) + end + def to_hash + end + + # Information about the run's data source. + module DataSource + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource, + OpenAI::Evals::CreateEvalCompletionsRunDataSource, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses + ) + end + + class Responses < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses, + OpenAI::Internal::AnyHash + ) + end + + # Determines what populates the `item` namespace in this run's data source. + sig do + returns( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Variants + ) + end + attr_accessor :source + + # The type of run data source. Always `responses`. + sig { returns(Symbol) } + attr_accessor :type + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Variants + ) + ) + end + attr_reader :input_messages + + sig do + params( + input_messages: + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::OrHash, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference::OrHash + ) + ).void + end + attr_writer :input_messages + + # The name of the model to use for generating completions (e.g. "o3-mini"). + sig { returns(T.nilable(String)) } + attr_reader :model + + sig { params(model: String).void } + attr_writer :model + + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams + ) + ) + end + attr_reader :sampling_params + + sig do + params( + sampling_params: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::OrHash + ).void + end + attr_writer :sampling_params + + # A ResponsesRunDataSource object describing a model sampling configuration. + sig do + params( + source: + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::OrHash, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID::OrHash, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses::OrHash + ), + input_messages: + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::OrHash, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference::OrHash + ), + model: String, + sampling_params: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Determines what populates the `item` namespace in this run's data source. + source:, + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + input_messages: nil, + # The name of the model to use for generating completions (e.g. "o3-mini"). + model: nil, + sampling_params: nil, + # The type of run data source. Always `responses`. + type: :responses + ) + end + + sig do + override.returns( + { + source: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Variants, + type: Symbol, + input_messages: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Variants, + model: String, + sampling_params: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams + } + ) + end + def to_hash + end + + # Determines what populates the `item` namespace in this run's data source. + module Source + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses + ) + end + + class FileContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent, + OpenAI::Internal::AnyHash + ) + end + + # The content of the jsonl file. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content + ] + ) + end + attr_accessor :content + + # The type of jsonl source. Always `file_content`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + content: + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content::OrHash + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The content of the jsonl file. + content:, + # The type of jsonl source. Always `file_content`. + type: :file_content + ) + end + + sig do + override.returns( + { + content: + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content + ], + type: Symbol + } + ) + end + def to_hash + end + + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :item + + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :sample + + sig { params(sample: T::Hash[Symbol, T.anything]).void } + attr_writer :sample + + sig do + params( + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + ).returns(T.attached_class) + end + def self.new(item:, sample: nil) + end + + sig do + override.returns( + { + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + } + ) + end + def to_hash + end + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the file. + sig { returns(String) } + attr_accessor :id + + # The type of jsonl source. Always `file_id`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(id: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # The identifier of the file. + id:, + # The type of jsonl source. Always `file_id`. + type: :file_id + ) + end + + sig { override.returns({ id: String, type: Symbol }) } + def to_hash + end + end + + class Responses < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses, + OpenAI::Internal::AnyHash + ) + end + + # The type of run data source. Always `responses`. + sig { returns(Symbol) } + attr_accessor :type + + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_after + + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_before + + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + sig { returns(T.nilable(String)) } + attr_accessor :instructions_search + + # Metadata filter for the responses. This is a query parameter used to select + # responses. + sig { returns(T.nilable(T.anything)) } + attr_accessor :metadata + + # The name of the model to find responses for. This is a query parameter used to + # select responses. + sig { returns(T.nilable(String)) } + attr_accessor :model + + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + sig do + returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) + end + attr_accessor :reasoning_effort + + # Sampling temperature. This is a query parameter used to select responses. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # List of tool names. This is a query parameter used to select responses. + sig { returns(T.nilable(T::Array[String])) } + attr_accessor :tools + + # Nucleus sampling parameter. This is a query parameter used to select responses. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # List of user identifiers. This is a query parameter used to select responses. + sig { returns(T.nilable(T::Array[String])) } + attr_accessor :users + + # A EvalResponsesSource object describing a run data source configuration. + sig do + params( + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + instructions_search: T.nilable(String), + metadata: T.nilable(T.anything), + model: T.nilable(String), + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), + temperature: T.nilable(Float), + tools: T.nilable(T::Array[String]), + top_p: T.nilable(Float), + users: T.nilable(T::Array[String]), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + created_after: nil, + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + created_before: nil, + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + instructions_search: nil, + # Metadata filter for the responses. This is a query parameter used to select + # responses. + metadata: nil, + # The name of the model to find responses for. This is a query parameter used to + # select responses. + model: nil, + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + reasoning_effort: nil, + # Sampling temperature. This is a query parameter used to select responses. + temperature: nil, + # List of tool names. This is a query parameter used to select responses. + tools: nil, + # Nucleus sampling parameter. This is a query parameter used to select responses. + top_p: nil, + # List of user identifiers. This is a query parameter used to select responses. + users: nil, + # The type of run data source. Always `responses`. + type: :responses + ) + end + + sig do + override.returns( + { + type: Symbol, + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + instructions_search: T.nilable(String), + metadata: T.nilable(T.anything), + model: T.nilable(String), + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::TaggedSymbol), + temperature: T.nilable(Float), + tools: T.nilable(T::Array[String]), + top_p: T.nilable(Float), + users: T.nilable(T::Array[String]) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Variants + ] + ) + end + def self.variants + end + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + module InputMessages + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference + ) + end + + class Template < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template, + OpenAI::Internal::AnyHash + ) + end + + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ] + ) + end + attr_accessor :template + + # The type of input messages. Always `template`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + template: + T::Array[ + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage::OrHash, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::OrHash + ) + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + template:, + # The type of input messages. Always `template`. + type: :template + ) + end + + sig do + override.returns( + { + template: + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ], + type: Symbol + } + ) + end + def to_hash + end + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem + ) + end + + class ChatMessage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message. + sig { returns(String) } + attr_accessor :content + + # The role of the message (e.g. "system", "assistant", "user"). + sig { returns(String) } + attr_accessor :role + + sig do + params(content: String, role: String).returns( + T.attached_class + ) + end + def self.new( + # The content of the message. + content:, + # The role of the message (e.g. "system", "assistant", "user"). + role: + ) + end + + sig { override.returns({ content: String, role: String }) } + def to_hash + end + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem, + OpenAI::Internal::AnyHash + ) + end + + # Inputs to the model - can contain template strings. + sig do + returns( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants + ) + end + attr_accessor :content + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + sig do + returns( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + end + attr_accessor :role + + # The type of the message input. Always `message`. + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + sig do + params( + content: + T.any( + String, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText::OrHash, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage::OrHash, + T::Array[T.anything] + ), + role: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::OrSymbol, + type: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Inputs to the model - can contain template strings. + content:, + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + role:, + # The type of the message input. Always `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants, + role: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol, + type: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + } + ) + end + def to_hash + end + + # Inputs to the model - can contain template strings. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, + T::Array[T.anything] + ) + end + + class OutputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Internal::AnyHash + ) + end + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + # A text output from the model. + sig do + params(text: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The text output from the model. + text:, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class InputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, + OpenAI::Internal::AnyHash + ) + end + + # The URL of the image input. + sig { returns(String) } + attr_accessor :image_url + + # The type of the image input. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig { returns(T.nilable(String)) } + attr_reader :detail + + sig { params(detail: String).void } + attr_writer :detail + + # An image input to the model. + sig do + params( + image_url: String, + detail: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The URL of the image input. + image_url:, + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail: nil, + # The type of the image input. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { image_url: String, type: Symbol, detail: String } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants + ] + ) + end + def self.variants + end + + AnArrayOfInputTextAndInputImageArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::Unknown + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ] + ) + end + def self.variants + end + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference, + OpenAI::Internal::AnyHash + ) + end + + # A reference to a variable in the `item` namespace. Ie, "item.name" + sig { returns(String) } + attr_accessor :item_reference + + # The type of input messages. Always `item_reference`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(item_reference: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # A reference to a variable in the `item` namespace. Ie, "item.name" + item_reference:, + # The type of input messages. Always `item_reference`. + type: :item_reference + ) + end + + sig do + override.returns({ item_reference: String, type: Symbol }) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Variants + ] + ) + end + def self.variants + end + end + + class SamplingParams < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams, + OpenAI::Internal::AnyHash + ) + end + + # The maximum number of tokens in the generated output. + sig { returns(T.nilable(Integer)) } + attr_reader :max_completion_tokens + + sig { params(max_completion_tokens: Integer).void } + attr_writer :max_completion_tokens + + # A seed value to initialize the randomness, during sampling. + sig { returns(T.nilable(Integer)) } + attr_reader :seed + + sig { params(seed: Integer).void } + attr_writer :seed + + # A higher temperature increases randomness in the outputs. + sig { returns(T.nilable(Float)) } + attr_reader :temperature + + sig { params(temperature: Float).void } + attr_writer :temperature + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text + ) + ) + end + attr_reader :text + + sig do + params( + text: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text::OrHash + ).void + end + attr_writer :text + + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + sig do + returns(T.nilable(T::Array[OpenAI::Responses::Tool::Variants])) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ] + ).void + end + attr_writer :tools + + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + sig { returns(T.nilable(Float)) } + attr_reader :top_p + + sig { params(top_p: Float).void } + attr_writer :top_p + + sig do + params( + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text::OrHash, + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ], + top_p: Float + ).returns(T.attached_class) + end + def self.new( + # The maximum number of tokens in the generated output. + max_completion_tokens: nil, + # A seed value to initialize the randomness, during sampling. + seed: nil, + # A higher temperature increases randomness in the outputs. + temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + text: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + tools: nil, + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + top_p: nil + ) + end + + sig do + override.returns( + { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, + tools: T::Array[OpenAI::Responses::Tool::Variants], + top_p: Float + } + ) + end + def to_hash + end + + class Text < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, + OpenAI::Internal::AnyHash + ) + end + + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseFormatTextConfig::Variants + ) + ) + end + attr_reader :format_ + + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).void + end + attr_writer :format_ + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + format_: nil + ) + end + + sig do + override.returns( + { + format_: + OpenAI::Responses::ResponseFormatTextConfig::Variants + } + ) + end + def to_hash + end + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCancelResponse::DataSource::Variants + ] + ) + end + def self.variants + end + end + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::PerModelUsage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens retrieved from cache. + sig { returns(Integer) } + attr_accessor :cached_tokens + + # The number of completion tokens generated. + sig { returns(Integer) } + attr_accessor :completion_tokens + + # The number of invocations. + sig { returns(Integer) } + attr_accessor :invocation_count + + # The name of the model. + sig { returns(String) } + attr_accessor :model_name + + # The number of prompt tokens used. + sig { returns(Integer) } + attr_accessor :prompt_tokens + + # The total number of tokens used. + sig { returns(Integer) } + attr_accessor :total_tokens + + sig do + params( + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens retrieved from cache. + cached_tokens:, + # The number of completion tokens generated. + completion_tokens:, + # The number of invocations. + invocation_count:, + # The name of the model. + model_name:, + # The number of prompt tokens used. + prompt_tokens:, + # The total number of tokens used. + total_tokens: + ) + end + + sig do + override.returns( + { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + end + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult, + OpenAI::Internal::AnyHash + ) + end + + # Number of tests failed for this criteria. + sig { returns(Integer) } + attr_accessor :failed + + # Number of tests passed for this criteria. + sig { returns(Integer) } + attr_accessor :passed + + # A description of the testing criteria. + sig { returns(String) } + attr_accessor :testing_criteria + + sig do + params( + failed: Integer, + passed: Integer, + testing_criteria: String + ).returns(T.attached_class) + end + def self.new( + # Number of tests failed for this criteria. + failed:, + # Number of tests passed for this criteria. + passed:, + # A description of the testing criteria. + testing_criteria: + ) + end + + sig do + override.returns( + { failed: Integer, passed: Integer, testing_criteria: String } + ) + end + def to_hash + end + end + + class ResultCounts < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCancelResponse::ResultCounts, + OpenAI::Internal::AnyHash + ) + end + + # Number of output items that resulted in an error. + sig { returns(Integer) } + attr_accessor :errored + + # Number of output items that failed to pass the evaluation. + sig { returns(Integer) } + attr_accessor :failed + + # Number of output items that passed the evaluation. + sig { returns(Integer) } + attr_accessor :passed + + # Total number of executed output items. + sig { returns(Integer) } + attr_accessor :total + + # Counters summarizing the outcomes of the evaluation run. + sig do + params( + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + ).returns(T.attached_class) + end + def self.new( + # Number of output items that resulted in an error. + errored:, + # Number of output items that failed to pass the evaluation. + failed:, + # Number of output items that passed the evaluation. + passed:, + # Total number of executed output items. + total: + ) + end + + sig do + override.returns( + { + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/run_create_params.rbi b/rbi/openai/models/evals/run_create_params.rbi new file mode 100644 index 00000000..e7ea35ca --- /dev/null +++ b/rbi/openai/models/evals/run_create_params.rbi @@ -0,0 +1,1359 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class RunCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Evals::RunCreateParams, OpenAI::Internal::AnyHash) + end + + # Details about the run's data source. + sig do + returns( + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource, + OpenAI::Evals::CreateEvalCompletionsRunDataSource, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource + ) + ) + end + attr_accessor :data_source + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The name of the run. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + sig do + params( + data_source: + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::OrHash, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::OrHash + ), + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Details about the run's data source. + data_source:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the run. + name: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + data_source: + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource, + OpenAI::Evals::CreateEvalCompletionsRunDataSource, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource + ), + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Details about the run's data source. + module DataSource + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource, + OpenAI::Evals::CreateEvalCompletionsRunDataSource, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource + ) + end + + class CreateEvalResponsesRunDataSource < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource, + OpenAI::Internal::AnyHash + ) + end + + # Determines what populates the `item` namespace in this run's data source. + sig do + returns( + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses + ) + ) + end + attr_accessor :source + + # The type of run data source. Always `responses`. + sig do + returns( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type::OrSymbol + ) + end + attr_accessor :type + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + sig do + returns( + T.nilable( + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference + ) + ) + ) + end + attr_reader :input_messages + + sig do + params( + input_messages: + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::OrHash, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference::OrHash + ) + ).void + end + attr_writer :input_messages + + # The name of the model to use for generating completions (e.g. "o3-mini"). + sig { returns(T.nilable(String)) } + attr_reader :model + + sig { params(model: String).void } + attr_writer :model + + sig do + returns( + T.nilable( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams + ) + ) + end + attr_reader :sampling_params + + sig do + params( + sampling_params: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::OrHash + ).void + end + attr_writer :sampling_params + + # A ResponsesRunDataSource object describing a model sampling configuration. + sig do + params( + source: + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::OrHash, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID::OrHash, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses::OrHash + ), + type: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type::OrSymbol, + input_messages: + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::OrHash, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference::OrHash + ), + model: String, + sampling_params: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::OrHash + ).returns(T.attached_class) + end + def self.new( + # Determines what populates the `item` namespace in this run's data source. + source:, + # The type of run data source. Always `responses`. + type:, + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + input_messages: nil, + # The name of the model to use for generating completions (e.g. "o3-mini"). + model: nil, + sampling_params: nil + ) + end + + sig do + override.returns( + { + source: + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses + ), + type: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type::OrSymbol, + input_messages: + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference + ), + model: String, + sampling_params: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams + } + ) + end + def to_hash + end + + # Determines what populates the `item` namespace in this run's data source. + module Source + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses + ) + end + + class FileContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent, + OpenAI::Internal::AnyHash + ) + end + + # The content of the jsonl file. + sig do + returns( + T::Array[ + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content + ] + ) + end + attr_accessor :content + + # The type of jsonl source. Always `file_content`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + content: + T::Array[ + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content::OrHash + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The content of the jsonl file. + content:, + # The type of jsonl source. Always `file_content`. + type: :file_content + ) + end + + sig do + override.returns( + { + content: + T::Array[ + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content + ], + type: Symbol + } + ) + end + def to_hash + end + + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :item + + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :sample + + sig { params(sample: T::Hash[Symbol, T.anything]).void } + attr_writer :sample + + sig do + params( + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + ).returns(T.attached_class) + end + def self.new(item:, sample: nil) + end + + sig do + override.returns( + { + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + } + ) + end + def to_hash + end + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the file. + sig { returns(String) } + attr_accessor :id + + # The type of jsonl source. Always `file_id`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(id: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # The identifier of the file. + id:, + # The type of jsonl source. Always `file_id`. + type: :file_id + ) + end + + sig { override.returns({ id: String, type: Symbol }) } + def to_hash + end + end + + class Responses < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses, + OpenAI::Internal::AnyHash + ) + end + + # The type of run data source. Always `responses`. + sig { returns(Symbol) } + attr_accessor :type + + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_after + + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_before + + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + sig { returns(T.nilable(String)) } + attr_accessor :instructions_search + + # Metadata filter for the responses. This is a query parameter used to select + # responses. + sig { returns(T.nilable(T.anything)) } + attr_accessor :metadata + + # The name of the model to find responses for. This is a query parameter used to + # select responses. + sig { returns(T.nilable(String)) } + attr_accessor :model + + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } + attr_accessor :reasoning_effort + + # Sampling temperature. This is a query parameter used to select responses. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # List of tool names. This is a query parameter used to select responses. + sig { returns(T.nilable(T::Array[String])) } + attr_accessor :tools + + # Nucleus sampling parameter. This is a query parameter used to select responses. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # List of user identifiers. This is a query parameter used to select responses. + sig { returns(T.nilable(T::Array[String])) } + attr_accessor :users + + # A EvalResponsesSource object describing a run data source configuration. + sig do + params( + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + instructions_search: T.nilable(String), + metadata: T.nilable(T.anything), + model: T.nilable(String), + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), + temperature: T.nilable(Float), + tools: T.nilable(T::Array[String]), + top_p: T.nilable(Float), + users: T.nilable(T::Array[String]), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + created_after: nil, + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + created_before: nil, + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + instructions_search: nil, + # Metadata filter for the responses. This is a query parameter used to select + # responses. + metadata: nil, + # The name of the model to find responses for. This is a query parameter used to + # select responses. + model: nil, + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + reasoning_effort: nil, + # Sampling temperature. This is a query parameter used to select responses. + temperature: nil, + # List of tool names. This is a query parameter used to select responses. + tools: nil, + # Nucleus sampling parameter. This is a query parameter used to select responses. + top_p: nil, + # List of user identifiers. This is a query parameter used to select responses. + users: nil, + # The type of run data source. Always `responses`. + type: :responses + ) + end + + sig do + override.returns( + { + type: Symbol, + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + instructions_search: T.nilable(String), + metadata: T.nilable(T.anything), + model: T.nilable(String), + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), + temperature: T.nilable(Float), + tools: T.nilable(T::Array[String]), + top_p: T.nilable(Float), + users: T.nilable(T::Array[String]) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Variants + ] + ) + end + def self.variants + end + end + + # The type of run data source. Always `responses`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + RESPONSES = + T.let( + :responses, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + module InputMessages + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference + ) + end + + class Template < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template, + OpenAI::Internal::AnyHash + ) + end + + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + sig do + returns( + T::Array[ + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem + ) + ] + ) + end + attr_accessor :template + + # The type of input messages. Always `template`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + template: + T::Array[ + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage::OrHash, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::OrHash + ) + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + template:, + # The type of input messages. Always `template`. + type: :template + ) + end + + sig do + override.returns( + { + template: + T::Array[ + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem + ) + ], + type: Symbol + } + ) + end + def to_hash + end + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem + ) + end + + class ChatMessage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message. + sig { returns(String) } + attr_accessor :content + + # The role of the message (e.g. "system", "assistant", "user"). + sig { returns(String) } + attr_accessor :role + + sig do + params(content: String, role: String).returns( + T.attached_class + ) + end + def self.new( + # The content of the message. + content:, + # The role of the message (e.g. "system", "assistant", "user"). + role: + ) + end + + sig { override.returns({ content: String, role: String }) } + def to_hash + end + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem, + OpenAI::Internal::AnyHash + ) + end + + # Inputs to the model - can contain template strings. + sig do + returns( + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, + T::Array[T.anything] + ) + ) + end + attr_accessor :content + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + sig do + returns( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role::OrSymbol + ) + end + attr_accessor :role + + # The type of the message input. Always `message`. + sig do + returns( + T.nilable( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + sig do + params( + content: + T.any( + String, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText::OrHash, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage::OrHash, + T::Array[T.anything] + ), + role: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role::OrSymbol, + type: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Inputs to the model - can contain template strings. + content:, + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + role:, + # The type of the message input. Always `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, + T::Array[T.anything] + ), + role: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role::OrSymbol, + type: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type::OrSymbol + } + ) + end + def to_hash + end + + # Inputs to the model - can contain template strings. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, + T::Array[T.anything] + ) + end + + class OutputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Internal::AnyHash + ) + end + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + # A text output from the model. + sig do + params(text: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The text output from the model. + text:, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class InputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, + OpenAI::Internal::AnyHash + ) + end + + # The URL of the image input. + sig { returns(String) } + attr_accessor :image_url + + # The type of the image input. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig { returns(T.nilable(String)) } + attr_reader :detail + + sig { params(detail: String).void } + attr_writer :detail + + # An image input to the model. + sig do + params( + image_url: String, + detail: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The URL of the image input. + image_url:, + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail: nil, + # The type of the image input. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { image_url: String, type: Symbol, detail: String } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::Variants + ] + ) + end + def self.variants + end + + AnArrayOfInputTextAndInputImageArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::Unknown + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::Variants + ] + ) + end + def self.variants + end + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference, + OpenAI::Internal::AnyHash + ) + end + + # A reference to a variable in the `item` namespace. Ie, "item.name" + sig { returns(String) } + attr_accessor :item_reference + + # The type of input messages. Always `item_reference`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(item_reference: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # A reference to a variable in the `item` namespace. Ie, "item.name" + item_reference:, + # The type of input messages. Always `item_reference`. + type: :item_reference + ) + end + + sig do + override.returns({ item_reference: String, type: Symbol }) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Variants + ] + ) + end + def self.variants + end + end + + class SamplingParams < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams, + OpenAI::Internal::AnyHash + ) + end + + # The maximum number of tokens in the generated output. + sig { returns(T.nilable(Integer)) } + attr_reader :max_completion_tokens + + sig { params(max_completion_tokens: Integer).void } + attr_writer :max_completion_tokens + + # A seed value to initialize the randomness, during sampling. + sig { returns(T.nilable(Integer)) } + attr_reader :seed + + sig { params(seed: Integer).void } + attr_writer :seed + + # A higher temperature increases randomness in the outputs. + sig { returns(T.nilable(Float)) } + attr_reader :temperature + + sig { params(temperature: Float).void } + attr_writer :temperature + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + returns( + T.nilable( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text + ) + ) + end + attr_reader :text + + sig do + params( + text: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text::OrHash + ).void + end + attr_writer :text + + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Responses::FunctionTool, + OpenAI::Responses::FileSearchTool, + OpenAI::Responses::ComputerTool, + OpenAI::Responses::Tool::Mcp, + OpenAI::Responses::Tool::CodeInterpreter, + OpenAI::Responses::Tool::ImageGeneration, + OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::CustomTool, + OpenAI::Responses::WebSearchTool + ) + ] + ) + ) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ] + ).void + end + attr_writer :tools + + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + sig { returns(T.nilable(Float)) } + attr_reader :top_p + + sig { params(top_p: Float).void } + attr_writer :top_p + + sig do + params( + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text::OrHash, + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ], + top_p: Float + ).returns(T.attached_class) + end + def self.new( + # The maximum number of tokens in the generated output. + max_completion_tokens: nil, + # A seed value to initialize the randomness, during sampling. + seed: nil, + # A higher temperature increases randomness in the outputs. + temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + text: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + tools: nil, + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + top_p: nil + ) + end + + sig do + override.returns( + { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool, + OpenAI::Responses::FileSearchTool, + OpenAI::Responses::ComputerTool, + OpenAI::Responses::Tool::Mcp, + OpenAI::Responses::Tool::CodeInterpreter, + OpenAI::Responses::Tool::ImageGeneration, + OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::CustomTool, + OpenAI::Responses::WebSearchTool + ) + ], + top_p: Float + } + ) + end + def to_hash + end + + class Text < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, + OpenAI::Internal::AnyHash + ) + end + + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + sig do + returns( + T.nilable( + T.any( + OpenAI::ResponseFormatText, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig, + OpenAI::ResponseFormatJSONObject + ) + ) + ) + end + attr_reader :format_ + + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).void + end + attr_writer :format_ + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + format_: nil + ) + end + + sig do + override.returns( + { + format_: + T.any( + OpenAI::ResponseFormatText, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig, + OpenAI::ResponseFormatJSONObject + ) + } + ) + end + def to_hash + end + end + end + end + + sig do + override.returns( + T::Array[OpenAI::Evals::RunCreateParams::DataSource::Variants] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/run_create_response.rbi b/rbi/openai/models/evals/run_create_response.rbi new file mode 100644 index 00000000..bf8ed611 --- /dev/null +++ b/rbi/openai/models/evals/run_create_response.rbi @@ -0,0 +1,1550 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class RunCreateResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the evaluation run. + sig { returns(String) } + attr_accessor :id + + # Unix timestamp (in seconds) when the evaluation run was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Information about the run's data source. + sig do + returns( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Variants + ) + end + attr_accessor :data_source + + # An object representing an error response from the Eval API. + sig { returns(OpenAI::Evals::EvalAPIError) } + attr_reader :error + + sig { params(error: OpenAI::Evals::EvalAPIError::OrHash).void } + attr_writer :error + + # The identifier of the associated evaluation. + sig { returns(String) } + attr_accessor :eval_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The model that is evaluated, if applicable. + sig { returns(String) } + attr_accessor :model + + # The name of the evaluation run. + sig { returns(String) } + attr_accessor :name + + # The type of the object. Always "eval.run". + sig { returns(Symbol) } + attr_accessor :object + + # Usage statistics for each model during the evaluation run. + sig do + returns( + T::Array[OpenAI::Models::Evals::RunCreateResponse::PerModelUsage] + ) + end + attr_accessor :per_model_usage + + # Results per testing criteria applied during the evaluation run. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult + ] + ) + end + attr_accessor :per_testing_criteria_results + + # The URL to the rendered evaluation run report on the UI dashboard. + sig { returns(String) } + attr_accessor :report_url + + # Counters summarizing the outcomes of the evaluation run. + sig { returns(OpenAI::Models::Evals::RunCreateResponse::ResultCounts) } + attr_reader :result_counts + + sig do + params( + result_counts: + OpenAI::Models::Evals::RunCreateResponse::ResultCounts::OrHash + ).void + end + attr_writer :result_counts + + # The status of the evaluation run. + sig { returns(String) } + attr_accessor :status + + # A schema representing an evaluation run. + sig do + params( + id: String, + created_at: Integer, + data_source: + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::OrHash, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::OrHash + ), + error: OpenAI::Evals::EvalAPIError::OrHash, + eval_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + name: String, + per_model_usage: + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::PerModelUsage::OrHash + ], + per_testing_criteria_results: + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult::OrHash + ], + report_url: String, + result_counts: + OpenAI::Models::Evals::RunCreateResponse::ResultCounts::OrHash, + status: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the evaluation run. + id:, + # Unix timestamp (in seconds) when the evaluation run was created. + created_at:, + # Information about the run's data source. + data_source:, + # An object representing an error response from the Eval API. + error:, + # The identifier of the associated evaluation. + eval_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The model that is evaluated, if applicable. + model:, + # The name of the evaluation run. + name:, + # Usage statistics for each model during the evaluation run. + per_model_usage:, + # Results per testing criteria applied during the evaluation run. + per_testing_criteria_results:, + # The URL to the rendered evaluation run report on the UI dashboard. + report_url:, + # Counters summarizing the outcomes of the evaluation run. + result_counts:, + # The status of the evaluation run. + status:, + # The type of the object. Always "eval.run". + object: :"eval.run" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data_source: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Variants, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + name: String, + object: Symbol, + per_model_usage: + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::PerModelUsage + ], + per_testing_criteria_results: + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult + ], + report_url: String, + result_counts: + OpenAI::Models::Evals::RunCreateResponse::ResultCounts, + status: String + } + ) + end + def to_hash + end + + # Information about the run's data source. + module DataSource + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource, + OpenAI::Evals::CreateEvalCompletionsRunDataSource, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses + ) + end + + class Responses < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses, + OpenAI::Internal::AnyHash + ) + end + + # Determines what populates the `item` namespace in this run's data source. + sig do + returns( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Variants + ) + end + attr_accessor :source + + # The type of run data source. Always `responses`. + sig { returns(Symbol) } + attr_accessor :type + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Variants + ) + ) + end + attr_reader :input_messages + + sig do + params( + input_messages: + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::OrHash, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference::OrHash + ) + ).void + end + attr_writer :input_messages + + # The name of the model to use for generating completions (e.g. "o3-mini"). + sig { returns(T.nilable(String)) } + attr_reader :model + + sig { params(model: String).void } + attr_writer :model + + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams + ) + ) + end + attr_reader :sampling_params + + sig do + params( + sampling_params: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::OrHash + ).void + end + attr_writer :sampling_params + + # A ResponsesRunDataSource object describing a model sampling configuration. + sig do + params( + source: + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::OrHash, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID::OrHash, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses::OrHash + ), + input_messages: + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::OrHash, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference::OrHash + ), + model: String, + sampling_params: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Determines what populates the `item` namespace in this run's data source. + source:, + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + input_messages: nil, + # The name of the model to use for generating completions (e.g. "o3-mini"). + model: nil, + sampling_params: nil, + # The type of run data source. Always `responses`. + type: :responses + ) + end + + sig do + override.returns( + { + source: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Variants, + type: Symbol, + input_messages: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Variants, + model: String, + sampling_params: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams + } + ) + end + def to_hash + end + + # Determines what populates the `item` namespace in this run's data source. + module Source + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses + ) + end + + class FileContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent, + OpenAI::Internal::AnyHash + ) + end + + # The content of the jsonl file. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content + ] + ) + end + attr_accessor :content + + # The type of jsonl source. Always `file_content`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + content: + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content::OrHash + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The content of the jsonl file. + content:, + # The type of jsonl source. Always `file_content`. + type: :file_content + ) + end + + sig do + override.returns( + { + content: + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content + ], + type: Symbol + } + ) + end + def to_hash + end + + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :item + + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :sample + + sig { params(sample: T::Hash[Symbol, T.anything]).void } + attr_writer :sample + + sig do + params( + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + ).returns(T.attached_class) + end + def self.new(item:, sample: nil) + end + + sig do + override.returns( + { + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + } + ) + end + def to_hash + end + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the file. + sig { returns(String) } + attr_accessor :id + + # The type of jsonl source. Always `file_id`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(id: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # The identifier of the file. + id:, + # The type of jsonl source. Always `file_id`. + type: :file_id + ) + end + + sig { override.returns({ id: String, type: Symbol }) } + def to_hash + end + end + + class Responses < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses, + OpenAI::Internal::AnyHash + ) + end + + # The type of run data source. Always `responses`. + sig { returns(Symbol) } + attr_accessor :type + + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_after + + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_before + + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + sig { returns(T.nilable(String)) } + attr_accessor :instructions_search + + # Metadata filter for the responses. This is a query parameter used to select + # responses. + sig { returns(T.nilable(T.anything)) } + attr_accessor :metadata + + # The name of the model to find responses for. This is a query parameter used to + # select responses. + sig { returns(T.nilable(String)) } + attr_accessor :model + + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + sig do + returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) + end + attr_accessor :reasoning_effort + + # Sampling temperature. This is a query parameter used to select responses. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # List of tool names. This is a query parameter used to select responses. + sig { returns(T.nilable(T::Array[String])) } + attr_accessor :tools + + # Nucleus sampling parameter. This is a query parameter used to select responses. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # List of user identifiers. This is a query parameter used to select responses. + sig { returns(T.nilable(T::Array[String])) } + attr_accessor :users + + # A EvalResponsesSource object describing a run data source configuration. + sig do + params( + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + instructions_search: T.nilable(String), + metadata: T.nilable(T.anything), + model: T.nilable(String), + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), + temperature: T.nilable(Float), + tools: T.nilable(T::Array[String]), + top_p: T.nilable(Float), + users: T.nilable(T::Array[String]), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + created_after: nil, + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + created_before: nil, + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + instructions_search: nil, + # Metadata filter for the responses. This is a query parameter used to select + # responses. + metadata: nil, + # The name of the model to find responses for. This is a query parameter used to + # select responses. + model: nil, + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + reasoning_effort: nil, + # Sampling temperature. This is a query parameter used to select responses. + temperature: nil, + # List of tool names. This is a query parameter used to select responses. + tools: nil, + # Nucleus sampling parameter. This is a query parameter used to select responses. + top_p: nil, + # List of user identifiers. This is a query parameter used to select responses. + users: nil, + # The type of run data source. Always `responses`. + type: :responses + ) + end + + sig do + override.returns( + { + type: Symbol, + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + instructions_search: T.nilable(String), + metadata: T.nilable(T.anything), + model: T.nilable(String), + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::TaggedSymbol), + temperature: T.nilable(Float), + tools: T.nilable(T::Array[String]), + top_p: T.nilable(Float), + users: T.nilable(T::Array[String]) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Variants + ] + ) + end + def self.variants + end + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + module InputMessages + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference + ) + end + + class Template < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template, + OpenAI::Internal::AnyHash + ) + end + + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ] + ) + end + attr_accessor :template + + # The type of input messages. Always `template`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + template: + T::Array[ + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage::OrHash, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::OrHash + ) + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + template:, + # The type of input messages. Always `template`. + type: :template + ) + end + + sig do + override.returns( + { + template: + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ], + type: Symbol + } + ) + end + def to_hash + end + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem + ) + end + + class ChatMessage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message. + sig { returns(String) } + attr_accessor :content + + # The role of the message (e.g. "system", "assistant", "user"). + sig { returns(String) } + attr_accessor :role + + sig do + params(content: String, role: String).returns( + T.attached_class + ) + end + def self.new( + # The content of the message. + content:, + # The role of the message (e.g. "system", "assistant", "user"). + role: + ) + end + + sig { override.returns({ content: String, role: String }) } + def to_hash + end + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem, + OpenAI::Internal::AnyHash + ) + end + + # Inputs to the model - can contain template strings. + sig do + returns( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants + ) + end + attr_accessor :content + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + sig do + returns( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + end + attr_accessor :role + + # The type of the message input. Always `message`. + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + sig do + params( + content: + T.any( + String, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText::OrHash, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage::OrHash, + T::Array[T.anything] + ), + role: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::OrSymbol, + type: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Inputs to the model - can contain template strings. + content:, + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + role:, + # The type of the message input. Always `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants, + role: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol, + type: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + } + ) + end + def to_hash + end + + # Inputs to the model - can contain template strings. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, + T::Array[T.anything] + ) + end + + class OutputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Internal::AnyHash + ) + end + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + # A text output from the model. + sig do + params(text: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The text output from the model. + text:, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class InputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, + OpenAI::Internal::AnyHash + ) + end + + # The URL of the image input. + sig { returns(String) } + attr_accessor :image_url + + # The type of the image input. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig { returns(T.nilable(String)) } + attr_reader :detail + + sig { params(detail: String).void } + attr_writer :detail + + # An image input to the model. + sig do + params( + image_url: String, + detail: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The URL of the image input. + image_url:, + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail: nil, + # The type of the image input. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { image_url: String, type: Symbol, detail: String } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants + ] + ) + end + def self.variants + end + + AnArrayOfInputTextAndInputImageArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::Unknown + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ] + ) + end + def self.variants + end + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference, + OpenAI::Internal::AnyHash + ) + end + + # A reference to a variable in the `item` namespace. Ie, "item.name" + sig { returns(String) } + attr_accessor :item_reference + + # The type of input messages. Always `item_reference`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(item_reference: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # A reference to a variable in the `item` namespace. Ie, "item.name" + item_reference:, + # The type of input messages. Always `item_reference`. + type: :item_reference + ) + end + + sig do + override.returns({ item_reference: String, type: Symbol }) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Variants + ] + ) + end + def self.variants + end + end + + class SamplingParams < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams, + OpenAI::Internal::AnyHash + ) + end + + # The maximum number of tokens in the generated output. + sig { returns(T.nilable(Integer)) } + attr_reader :max_completion_tokens + + sig { params(max_completion_tokens: Integer).void } + attr_writer :max_completion_tokens + + # A seed value to initialize the randomness, during sampling. + sig { returns(T.nilable(Integer)) } + attr_reader :seed + + sig { params(seed: Integer).void } + attr_writer :seed + + # A higher temperature increases randomness in the outputs. + sig { returns(T.nilable(Float)) } + attr_reader :temperature + + sig { params(temperature: Float).void } + attr_writer :temperature + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text + ) + ) + end + attr_reader :text + + sig do + params( + text: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text::OrHash + ).void + end + attr_writer :text + + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + sig do + returns(T.nilable(T::Array[OpenAI::Responses::Tool::Variants])) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ] + ).void + end + attr_writer :tools + + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + sig { returns(T.nilable(Float)) } + attr_reader :top_p + + sig { params(top_p: Float).void } + attr_writer :top_p + + sig do + params( + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text::OrHash, + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ], + top_p: Float + ).returns(T.attached_class) + end + def self.new( + # The maximum number of tokens in the generated output. + max_completion_tokens: nil, + # A seed value to initialize the randomness, during sampling. + seed: nil, + # A higher temperature increases randomness in the outputs. + temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + text: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + tools: nil, + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + top_p: nil + ) + end + + sig do + override.returns( + { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text, + tools: T::Array[OpenAI::Responses::Tool::Variants], + top_p: Float + } + ) + end + def to_hash + end + + class Text < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text, + OpenAI::Internal::AnyHash + ) + end + + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseFormatTextConfig::Variants + ) + ) + end + attr_reader :format_ + + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).void + end + attr_writer :format_ + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + format_: nil + ) + end + + sig do + override.returns( + { + format_: + OpenAI::Responses::ResponseFormatTextConfig::Variants + } + ) + end + def to_hash + end + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunCreateResponse::DataSource::Variants + ] + ) + end + def self.variants + end + end + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::PerModelUsage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens retrieved from cache. + sig { returns(Integer) } + attr_accessor :cached_tokens + + # The number of completion tokens generated. + sig { returns(Integer) } + attr_accessor :completion_tokens + + # The number of invocations. + sig { returns(Integer) } + attr_accessor :invocation_count + + # The name of the model. + sig { returns(String) } + attr_accessor :model_name + + # The number of prompt tokens used. + sig { returns(Integer) } + attr_accessor :prompt_tokens + + # The total number of tokens used. + sig { returns(Integer) } + attr_accessor :total_tokens + + sig do + params( + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens retrieved from cache. + cached_tokens:, + # The number of completion tokens generated. + completion_tokens:, + # The number of invocations. + invocation_count:, + # The name of the model. + model_name:, + # The number of prompt tokens used. + prompt_tokens:, + # The total number of tokens used. + total_tokens: + ) + end + + sig do + override.returns( + { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + end + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult, + OpenAI::Internal::AnyHash + ) + end + + # Number of tests failed for this criteria. + sig { returns(Integer) } + attr_accessor :failed + + # Number of tests passed for this criteria. + sig { returns(Integer) } + attr_accessor :passed + + # A description of the testing criteria. + sig { returns(String) } + attr_accessor :testing_criteria + + sig do + params( + failed: Integer, + passed: Integer, + testing_criteria: String + ).returns(T.attached_class) + end + def self.new( + # Number of tests failed for this criteria. + failed:, + # Number of tests passed for this criteria. + passed:, + # A description of the testing criteria. + testing_criteria: + ) + end + + sig do + override.returns( + { failed: Integer, passed: Integer, testing_criteria: String } + ) + end + def to_hash + end + end + + class ResultCounts < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunCreateResponse::ResultCounts, + OpenAI::Internal::AnyHash + ) + end + + # Number of output items that resulted in an error. + sig { returns(Integer) } + attr_accessor :errored + + # Number of output items that failed to pass the evaluation. + sig { returns(Integer) } + attr_accessor :failed + + # Number of output items that passed the evaluation. + sig { returns(Integer) } + attr_accessor :passed + + # Total number of executed output items. + sig { returns(Integer) } + attr_accessor :total + + # Counters summarizing the outcomes of the evaluation run. + sig do + params( + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + ).returns(T.attached_class) + end + def self.new( + # Number of output items that resulted in an error. + errored:, + # Number of output items that failed to pass the evaluation. + failed:, + # Number of output items that passed the evaluation. + passed:, + # Total number of executed output items. + total: + ) + end + + sig do + override.returns( + { + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/run_delete_params.rbi b/rbi/openai/models/evals/run_delete_params.rbi new file mode 100644 index 00000000..e82ad163 --- /dev/null +++ b/rbi/openai/models/evals/run_delete_params.rbi @@ -0,0 +1,37 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class RunDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Evals::RunDeleteParams, OpenAI::Internal::AnyHash) + end + + sig { returns(String) } + attr_accessor :eval_id + + sig do + params( + eval_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(eval_id:, request_options: {}) + end + + sig do + override.returns( + { eval_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/evals/run_delete_response.rbi b/rbi/openai/models/evals/run_delete_response.rbi new file mode 100644 index 00000000..552da6af --- /dev/null +++ b/rbi/openai/models/evals/run_delete_response.rbi @@ -0,0 +1,51 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class RunDeleteResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunDeleteResponse, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T.nilable(T::Boolean)) } + attr_reader :deleted + + sig { params(deleted: T::Boolean).void } + attr_writer :deleted + + sig { returns(T.nilable(String)) } + attr_reader :object + + sig { params(object: String).void } + attr_writer :object + + sig { returns(T.nilable(String)) } + attr_reader :run_id + + sig { params(run_id: String).void } + attr_writer :run_id + + sig do + params(deleted: T::Boolean, object: String, run_id: String).returns( + T.attached_class + ) + end + def self.new(deleted: nil, object: nil, run_id: nil) + end + + sig do + override.returns( + { deleted: T::Boolean, object: String, run_id: String } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/evals/run_list_params.rbi b/rbi/openai/models/evals/run_list_params.rbi new file mode 100644 index 00000000..67d654cd --- /dev/null +++ b/rbi/openai/models/evals/run_list_params.rbi @@ -0,0 +1,149 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class RunListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Evals::RunListParams, OpenAI::Internal::AnyHash) + end + + # Identifier for the last run from the previous pagination request. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Number of runs to retrieve. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for + # descending order. Defaults to `asc`. + sig do + returns(T.nilable(OpenAI::Evals::RunListParams::Order::OrSymbol)) + end + attr_reader :order + + sig do + params(order: OpenAI::Evals::RunListParams::Order::OrSymbol).void + end + attr_writer :order + + # Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + # | `canceled`. + sig do + returns(T.nilable(OpenAI::Evals::RunListParams::Status::OrSymbol)) + end + attr_reader :status + + sig do + params(status: OpenAI::Evals::RunListParams::Status::OrSymbol).void + end + attr_writer :status + + sig do + params( + after: String, + limit: Integer, + order: OpenAI::Evals::RunListParams::Order::OrSymbol, + status: OpenAI::Evals::RunListParams::Status::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Identifier for the last run from the previous pagination request. + after: nil, + # Number of runs to retrieve. + limit: nil, + # Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for + # descending order. Defaults to `asc`. + order: nil, + # Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + # | `canceled`. + status: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + order: OpenAI::Evals::RunListParams::Order::OrSymbol, + status: OpenAI::Evals::RunListParams::Status::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for + # descending order. Defaults to `asc`. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Evals::RunListParams::Order) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = T.let(:asc, OpenAI::Evals::RunListParams::Order::TaggedSymbol) + DESC = T.let(:desc, OpenAI::Evals::RunListParams::Order::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Evals::RunListParams::Order::TaggedSymbol] + ) + end + def self.values + end + end + + # Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + # | `canceled`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Evals::RunListParams::Status) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + QUEUED = + T.let(:queued, OpenAI::Evals::RunListParams::Status::TaggedSymbol) + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Evals::RunListParams::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Evals::RunListParams::Status::TaggedSymbol + ) + CANCELED = + T.let(:canceled, OpenAI::Evals::RunListParams::Status::TaggedSymbol) + FAILED = + T.let(:failed, OpenAI::Evals::RunListParams::Status::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Evals::RunListParams::Status::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/run_list_response.rbi b/rbi/openai/models/evals/run_list_response.rbi new file mode 100644 index 00000000..ab3d4305 --- /dev/null +++ b/rbi/openai/models/evals/run_list_response.rbi @@ -0,0 +1,1546 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class RunListResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the evaluation run. + sig { returns(String) } + attr_accessor :id + + # Unix timestamp (in seconds) when the evaluation run was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Information about the run's data source. + sig do + returns(OpenAI::Models::Evals::RunListResponse::DataSource::Variants) + end + attr_accessor :data_source + + # An object representing an error response from the Eval API. + sig { returns(OpenAI::Evals::EvalAPIError) } + attr_reader :error + + sig { params(error: OpenAI::Evals::EvalAPIError::OrHash).void } + attr_writer :error + + # The identifier of the associated evaluation. + sig { returns(String) } + attr_accessor :eval_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The model that is evaluated, if applicable. + sig { returns(String) } + attr_accessor :model + + # The name of the evaluation run. + sig { returns(String) } + attr_accessor :name + + # The type of the object. Always "eval.run". + sig { returns(Symbol) } + attr_accessor :object + + # Usage statistics for each model during the evaluation run. + sig do + returns( + T::Array[OpenAI::Models::Evals::RunListResponse::PerModelUsage] + ) + end + attr_accessor :per_model_usage + + # Results per testing criteria applied during the evaluation run. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult + ] + ) + end + attr_accessor :per_testing_criteria_results + + # The URL to the rendered evaluation run report on the UI dashboard. + sig { returns(String) } + attr_accessor :report_url + + # Counters summarizing the outcomes of the evaluation run. + sig { returns(OpenAI::Models::Evals::RunListResponse::ResultCounts) } + attr_reader :result_counts + + sig do + params( + result_counts: + OpenAI::Models::Evals::RunListResponse::ResultCounts::OrHash + ).void + end + attr_writer :result_counts + + # The status of the evaluation run. + sig { returns(String) } + attr_accessor :status + + # A schema representing an evaluation run. + sig do + params( + id: String, + created_at: Integer, + data_source: + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::OrHash, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::OrHash + ), + error: OpenAI::Evals::EvalAPIError::OrHash, + eval_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + name: String, + per_model_usage: + T::Array[ + OpenAI::Models::Evals::RunListResponse::PerModelUsage::OrHash + ], + per_testing_criteria_results: + T::Array[ + OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult::OrHash + ], + report_url: String, + result_counts: + OpenAI::Models::Evals::RunListResponse::ResultCounts::OrHash, + status: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the evaluation run. + id:, + # Unix timestamp (in seconds) when the evaluation run was created. + created_at:, + # Information about the run's data source. + data_source:, + # An object representing an error response from the Eval API. + error:, + # The identifier of the associated evaluation. + eval_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The model that is evaluated, if applicable. + model:, + # The name of the evaluation run. + name:, + # Usage statistics for each model during the evaluation run. + per_model_usage:, + # Results per testing criteria applied during the evaluation run. + per_testing_criteria_results:, + # The URL to the rendered evaluation run report on the UI dashboard. + report_url:, + # Counters summarizing the outcomes of the evaluation run. + result_counts:, + # The status of the evaluation run. + status:, + # The type of the object. Always "eval.run". + object: :"eval.run" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data_source: + OpenAI::Models::Evals::RunListResponse::DataSource::Variants, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + name: String, + object: Symbol, + per_model_usage: + T::Array[OpenAI::Models::Evals::RunListResponse::PerModelUsage], + per_testing_criteria_results: + T::Array[ + OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult + ], + report_url: String, + result_counts: + OpenAI::Models::Evals::RunListResponse::ResultCounts, + status: String + } + ) + end + def to_hash + end + + # Information about the run's data source. + module DataSource + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource, + OpenAI::Evals::CreateEvalCompletionsRunDataSource, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses + ) + end + + class Responses < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses, + OpenAI::Internal::AnyHash + ) + end + + # Determines what populates the `item` namespace in this run's data source. + sig do + returns( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Variants + ) + end + attr_accessor :source + + # The type of run data source. Always `responses`. + sig { returns(Symbol) } + attr_accessor :type + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Variants + ) + ) + end + attr_reader :input_messages + + sig do + params( + input_messages: + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::OrHash, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference::OrHash + ) + ).void + end + attr_writer :input_messages + + # The name of the model to use for generating completions (e.g. "o3-mini"). + sig { returns(T.nilable(String)) } + attr_reader :model + + sig { params(model: String).void } + attr_writer :model + + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams + ) + ) + end + attr_reader :sampling_params + + sig do + params( + sampling_params: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::OrHash + ).void + end + attr_writer :sampling_params + + # A ResponsesRunDataSource object describing a model sampling configuration. + sig do + params( + source: + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::OrHash, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID::OrHash, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses::OrHash + ), + input_messages: + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::OrHash, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference::OrHash + ), + model: String, + sampling_params: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Determines what populates the `item` namespace in this run's data source. + source:, + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + input_messages: nil, + # The name of the model to use for generating completions (e.g. "o3-mini"). + model: nil, + sampling_params: nil, + # The type of run data source. Always `responses`. + type: :responses + ) + end + + sig do + override.returns( + { + source: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Variants, + type: Symbol, + input_messages: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Variants, + model: String, + sampling_params: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams + } + ) + end + def to_hash + end + + # Determines what populates the `item` namespace in this run's data source. + module Source + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses + ) + end + + class FileContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent, + OpenAI::Internal::AnyHash + ) + end + + # The content of the jsonl file. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content + ] + ) + end + attr_accessor :content + + # The type of jsonl source. Always `file_content`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + content: + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content::OrHash + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The content of the jsonl file. + content:, + # The type of jsonl source. Always `file_content`. + type: :file_content + ) + end + + sig do + override.returns( + { + content: + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content + ], + type: Symbol + } + ) + end + def to_hash + end + + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :item + + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :sample + + sig { params(sample: T::Hash[Symbol, T.anything]).void } + attr_writer :sample + + sig do + params( + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + ).returns(T.attached_class) + end + def self.new(item:, sample: nil) + end + + sig do + override.returns( + { + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + } + ) + end + def to_hash + end + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the file. + sig { returns(String) } + attr_accessor :id + + # The type of jsonl source. Always `file_id`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(id: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # The identifier of the file. + id:, + # The type of jsonl source. Always `file_id`. + type: :file_id + ) + end + + sig { override.returns({ id: String, type: Symbol }) } + def to_hash + end + end + + class Responses < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses, + OpenAI::Internal::AnyHash + ) + end + + # The type of run data source. Always `responses`. + sig { returns(Symbol) } + attr_accessor :type + + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_after + + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_before + + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + sig { returns(T.nilable(String)) } + attr_accessor :instructions_search + + # Metadata filter for the responses. This is a query parameter used to select + # responses. + sig { returns(T.nilable(T.anything)) } + attr_accessor :metadata + + # The name of the model to find responses for. This is a query parameter used to + # select responses. + sig { returns(T.nilable(String)) } + attr_accessor :model + + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + sig do + returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) + end + attr_accessor :reasoning_effort + + # Sampling temperature. This is a query parameter used to select responses. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # List of tool names. This is a query parameter used to select responses. + sig { returns(T.nilable(T::Array[String])) } + attr_accessor :tools + + # Nucleus sampling parameter. This is a query parameter used to select responses. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # List of user identifiers. This is a query parameter used to select responses. + sig { returns(T.nilable(T::Array[String])) } + attr_accessor :users + + # A EvalResponsesSource object describing a run data source configuration. + sig do + params( + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + instructions_search: T.nilable(String), + metadata: T.nilable(T.anything), + model: T.nilable(String), + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), + temperature: T.nilable(Float), + tools: T.nilable(T::Array[String]), + top_p: T.nilable(Float), + users: T.nilable(T::Array[String]), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + created_after: nil, + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + created_before: nil, + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + instructions_search: nil, + # Metadata filter for the responses. This is a query parameter used to select + # responses. + metadata: nil, + # The name of the model to find responses for. This is a query parameter used to + # select responses. + model: nil, + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + reasoning_effort: nil, + # Sampling temperature. This is a query parameter used to select responses. + temperature: nil, + # List of tool names. This is a query parameter used to select responses. + tools: nil, + # Nucleus sampling parameter. This is a query parameter used to select responses. + top_p: nil, + # List of user identifiers. This is a query parameter used to select responses. + users: nil, + # The type of run data source. Always `responses`. + type: :responses + ) + end + + sig do + override.returns( + { + type: Symbol, + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + instructions_search: T.nilable(String), + metadata: T.nilable(T.anything), + model: T.nilable(String), + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::TaggedSymbol), + temperature: T.nilable(Float), + tools: T.nilable(T::Array[String]), + top_p: T.nilable(Float), + users: T.nilable(T::Array[String]) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Variants + ] + ) + end + def self.variants + end + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + module InputMessages + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference + ) + end + + class Template < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template, + OpenAI::Internal::AnyHash + ) + end + + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ] + ) + end + attr_accessor :template + + # The type of input messages. Always `template`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + template: + T::Array[ + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage::OrHash, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::OrHash + ) + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + template:, + # The type of input messages. Always `template`. + type: :template + ) + end + + sig do + override.returns( + { + template: + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ], + type: Symbol + } + ) + end + def to_hash + end + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem + ) + end + + class ChatMessage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message. + sig { returns(String) } + attr_accessor :content + + # The role of the message (e.g. "system", "assistant", "user"). + sig { returns(String) } + attr_accessor :role + + sig do + params(content: String, role: String).returns( + T.attached_class + ) + end + def self.new( + # The content of the message. + content:, + # The role of the message (e.g. "system", "assistant", "user"). + role: + ) + end + + sig { override.returns({ content: String, role: String }) } + def to_hash + end + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem, + OpenAI::Internal::AnyHash + ) + end + + # Inputs to the model - can contain template strings. + sig do + returns( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants + ) + end + attr_accessor :content + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + sig do + returns( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + end + attr_accessor :role + + # The type of the message input. Always `message`. + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + sig do + params( + content: + T.any( + String, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText::OrHash, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage::OrHash, + T::Array[T.anything] + ), + role: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::OrSymbol, + type: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Inputs to the model - can contain template strings. + content:, + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + role:, + # The type of the message input. Always `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants, + role: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol, + type: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + } + ) + end + def to_hash + end + + # Inputs to the model - can contain template strings. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, + T::Array[T.anything] + ) + end + + class OutputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Internal::AnyHash + ) + end + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + # A text output from the model. + sig do + params(text: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The text output from the model. + text:, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class InputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, + OpenAI::Internal::AnyHash + ) + end + + # The URL of the image input. + sig { returns(String) } + attr_accessor :image_url + + # The type of the image input. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig { returns(T.nilable(String)) } + attr_reader :detail + + sig { params(detail: String).void } + attr_writer :detail + + # An image input to the model. + sig do + params( + image_url: String, + detail: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The URL of the image input. + image_url:, + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail: nil, + # The type of the image input. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { image_url: String, type: Symbol, detail: String } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants + ] + ) + end + def self.variants + end + + AnArrayOfInputTextAndInputImageArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::Unknown + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ] + ) + end + def self.variants + end + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference, + OpenAI::Internal::AnyHash + ) + end + + # A reference to a variable in the `item` namespace. Ie, "item.name" + sig { returns(String) } + attr_accessor :item_reference + + # The type of input messages. Always `item_reference`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(item_reference: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # A reference to a variable in the `item` namespace. Ie, "item.name" + item_reference:, + # The type of input messages. Always `item_reference`. + type: :item_reference + ) + end + + sig do + override.returns({ item_reference: String, type: Symbol }) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Variants + ] + ) + end + def self.variants + end + end + + class SamplingParams < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams, + OpenAI::Internal::AnyHash + ) + end + + # The maximum number of tokens in the generated output. + sig { returns(T.nilable(Integer)) } + attr_reader :max_completion_tokens + + sig { params(max_completion_tokens: Integer).void } + attr_writer :max_completion_tokens + + # A seed value to initialize the randomness, during sampling. + sig { returns(T.nilable(Integer)) } + attr_reader :seed + + sig { params(seed: Integer).void } + attr_writer :seed + + # A higher temperature increases randomness in the outputs. + sig { returns(T.nilable(Float)) } + attr_reader :temperature + + sig { params(temperature: Float).void } + attr_writer :temperature + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text + ) + ) + end + attr_reader :text + + sig do + params( + text: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text::OrHash + ).void + end + attr_writer :text + + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + sig do + returns(T.nilable(T::Array[OpenAI::Responses::Tool::Variants])) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ] + ).void + end + attr_writer :tools + + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + sig { returns(T.nilable(Float)) } + attr_reader :top_p + + sig { params(top_p: Float).void } + attr_writer :top_p + + sig do + params( + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text::OrHash, + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ], + top_p: Float + ).returns(T.attached_class) + end + def self.new( + # The maximum number of tokens in the generated output. + max_completion_tokens: nil, + # A seed value to initialize the randomness, during sampling. + seed: nil, + # A higher temperature increases randomness in the outputs. + temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + text: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + tools: nil, + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + top_p: nil + ) + end + + sig do + override.returns( + { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text, + tools: T::Array[OpenAI::Responses::Tool::Variants], + top_p: Float + } + ) + end + def to_hash + end + + class Text < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text, + OpenAI::Internal::AnyHash + ) + end + + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseFormatTextConfig::Variants + ) + ) + end + attr_reader :format_ + + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).void + end + attr_writer :format_ + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + format_: nil + ) + end + + sig do + override.returns( + { + format_: + OpenAI::Responses::ResponseFormatTextConfig::Variants + } + ) + end + def to_hash + end + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunListResponse::DataSource::Variants + ] + ) + end + def self.variants + end + end + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::PerModelUsage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens retrieved from cache. + sig { returns(Integer) } + attr_accessor :cached_tokens + + # The number of completion tokens generated. + sig { returns(Integer) } + attr_accessor :completion_tokens + + # The number of invocations. + sig { returns(Integer) } + attr_accessor :invocation_count + + # The name of the model. + sig { returns(String) } + attr_accessor :model_name + + # The number of prompt tokens used. + sig { returns(Integer) } + attr_accessor :prompt_tokens + + # The total number of tokens used. + sig { returns(Integer) } + attr_accessor :total_tokens + + sig do + params( + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens retrieved from cache. + cached_tokens:, + # The number of completion tokens generated. + completion_tokens:, + # The number of invocations. + invocation_count:, + # The name of the model. + model_name:, + # The number of prompt tokens used. + prompt_tokens:, + # The total number of tokens used. + total_tokens: + ) + end + + sig do + override.returns( + { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + end + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult, + OpenAI::Internal::AnyHash + ) + end + + # Number of tests failed for this criteria. + sig { returns(Integer) } + attr_accessor :failed + + # Number of tests passed for this criteria. + sig { returns(Integer) } + attr_accessor :passed + + # A description of the testing criteria. + sig { returns(String) } + attr_accessor :testing_criteria + + sig do + params( + failed: Integer, + passed: Integer, + testing_criteria: String + ).returns(T.attached_class) + end + def self.new( + # Number of tests failed for this criteria. + failed:, + # Number of tests passed for this criteria. + passed:, + # A description of the testing criteria. + testing_criteria: + ) + end + + sig do + override.returns( + { failed: Integer, passed: Integer, testing_criteria: String } + ) + end + def to_hash + end + end + + class ResultCounts < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunListResponse::ResultCounts, + OpenAI::Internal::AnyHash + ) + end + + # Number of output items that resulted in an error. + sig { returns(Integer) } + attr_accessor :errored + + # Number of output items that failed to pass the evaluation. + sig { returns(Integer) } + attr_accessor :failed + + # Number of output items that passed the evaluation. + sig { returns(Integer) } + attr_accessor :passed + + # Total number of executed output items. + sig { returns(Integer) } + attr_accessor :total + + # Counters summarizing the outcomes of the evaluation run. + sig do + params( + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + ).returns(T.attached_class) + end + def self.new( + # Number of output items that resulted in an error. + errored:, + # Number of output items that failed to pass the evaluation. + failed:, + # Number of output items that passed the evaluation. + passed:, + # Total number of executed output items. + total: + ) + end + + sig do + override.returns( + { + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/run_retrieve_params.rbi b/rbi/openai/models/evals/run_retrieve_params.rbi new file mode 100644 index 00000000..002b7cb6 --- /dev/null +++ b/rbi/openai/models/evals/run_retrieve_params.rbi @@ -0,0 +1,37 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class RunRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Evals::RunRetrieveParams, OpenAI::Internal::AnyHash) + end + + sig { returns(String) } + attr_accessor :eval_id + + sig do + params( + eval_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(eval_id:, request_options: {}) + end + + sig do + override.returns( + { eval_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/evals/run_retrieve_response.rbi b/rbi/openai/models/evals/run_retrieve_response.rbi new file mode 100644 index 00000000..8797d797 --- /dev/null +++ b/rbi/openai/models/evals/run_retrieve_response.rbi @@ -0,0 +1,1552 @@ +# typed: strong + +module OpenAI + module Models + module Evals + class RunRetrieveResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the evaluation run. + sig { returns(String) } + attr_accessor :id + + # Unix timestamp (in seconds) when the evaluation run was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Information about the run's data source. + sig do + returns( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Variants + ) + end + attr_accessor :data_source + + # An object representing an error response from the Eval API. + sig { returns(OpenAI::Evals::EvalAPIError) } + attr_reader :error + + sig { params(error: OpenAI::Evals::EvalAPIError::OrHash).void } + attr_writer :error + + # The identifier of the associated evaluation. + sig { returns(String) } + attr_accessor :eval_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The model that is evaluated, if applicable. + sig { returns(String) } + attr_accessor :model + + # The name of the evaluation run. + sig { returns(String) } + attr_accessor :name + + # The type of the object. Always "eval.run". + sig { returns(Symbol) } + attr_accessor :object + + # Usage statistics for each model during the evaluation run. + sig do + returns( + T::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage] + ) + end + attr_accessor :per_model_usage + + # Results per testing criteria applied during the evaluation run. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult + ] + ) + end + attr_accessor :per_testing_criteria_results + + # The URL to the rendered evaluation run report on the UI dashboard. + sig { returns(String) } + attr_accessor :report_url + + # Counters summarizing the outcomes of the evaluation run. + sig do + returns(OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts) + end + attr_reader :result_counts + + sig do + params( + result_counts: + OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts::OrHash + ).void + end + attr_writer :result_counts + + # The status of the evaluation run. + sig { returns(String) } + attr_accessor :status + + # A schema representing an evaluation run. + sig do + params( + id: String, + created_at: Integer, + data_source: + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::OrHash, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::OrHash + ), + error: OpenAI::Evals::EvalAPIError::OrHash, + eval_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + name: String, + per_model_usage: + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage::OrHash + ], + per_testing_criteria_results: + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult::OrHash + ], + report_url: String, + result_counts: + OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts::OrHash, + status: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the evaluation run. + id:, + # Unix timestamp (in seconds) when the evaluation run was created. + created_at:, + # Information about the run's data source. + data_source:, + # An object representing an error response from the Eval API. + error:, + # The identifier of the associated evaluation. + eval_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The model that is evaluated, if applicable. + model:, + # The name of the evaluation run. + name:, + # Usage statistics for each model during the evaluation run. + per_model_usage:, + # Results per testing criteria applied during the evaluation run. + per_testing_criteria_results:, + # The URL to the rendered evaluation run report on the UI dashboard. + report_url:, + # Counters summarizing the outcomes of the evaluation run. + result_counts:, + # The status of the evaluation run. + status:, + # The type of the object. Always "eval.run". + object: :"eval.run" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data_source: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Variants, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + name: String, + object: Symbol, + per_model_usage: + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage + ], + per_testing_criteria_results: + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult + ], + report_url: String, + result_counts: + OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts, + status: String + } + ) + end + def to_hash + end + + # Information about the run's data source. + module DataSource + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource, + OpenAI::Evals::CreateEvalCompletionsRunDataSource, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses + ) + end + + class Responses < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses, + OpenAI::Internal::AnyHash + ) + end + + # Determines what populates the `item` namespace in this run's data source. + sig do + returns( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Variants + ) + end + attr_accessor :source + + # The type of run data source. Always `responses`. + sig { returns(Symbol) } + attr_accessor :type + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Variants + ) + ) + end + attr_reader :input_messages + + sig do + params( + input_messages: + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::OrHash, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference::OrHash + ) + ).void + end + attr_writer :input_messages + + # The name of the model to use for generating completions (e.g. "o3-mini"). + sig { returns(T.nilable(String)) } + attr_reader :model + + sig { params(model: String).void } + attr_writer :model + + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams + ) + ) + end + attr_reader :sampling_params + + sig do + params( + sampling_params: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::OrHash + ).void + end + attr_writer :sampling_params + + # A ResponsesRunDataSource object describing a model sampling configuration. + sig do + params( + source: + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::OrHash, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID::OrHash, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses::OrHash + ), + input_messages: + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::OrHash, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference::OrHash + ), + model: String, + sampling_params: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Determines what populates the `item` namespace in this run's data source. + source:, + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + input_messages: nil, + # The name of the model to use for generating completions (e.g. "o3-mini"). + model: nil, + sampling_params: nil, + # The type of run data source. Always `responses`. + type: :responses + ) + end + + sig do + override.returns( + { + source: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Variants, + type: Symbol, + input_messages: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Variants, + model: String, + sampling_params: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams + } + ) + end + def to_hash + end + + # Determines what populates the `item` namespace in this run's data source. + module Source + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses + ) + end + + class FileContent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent, + OpenAI::Internal::AnyHash + ) + end + + # The content of the jsonl file. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content + ] + ) + end + attr_accessor :content + + # The type of jsonl source. Always `file_content`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + content: + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content::OrHash + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The content of the jsonl file. + content:, + # The type of jsonl source. Always `file_content`. + type: :file_content + ) + end + + sig do + override.returns( + { + content: + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content + ], + type: Symbol + } + ) + end + def to_hash + end + + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :item + + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :sample + + sig { params(sample: T::Hash[Symbol, T.anything]).void } + attr_writer :sample + + sig do + params( + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + ).returns(T.attached_class) + end + def self.new(item:, sample: nil) + end + + sig do + override.returns( + { + item: T::Hash[Symbol, T.anything], + sample: T::Hash[Symbol, T.anything] + } + ) + end + def to_hash + end + end + end + + class FileID < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID, + OpenAI::Internal::AnyHash + ) + end + + # The identifier of the file. + sig { returns(String) } + attr_accessor :id + + # The type of jsonl source. Always `file_id`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(id: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # The identifier of the file. + id:, + # The type of jsonl source. Always `file_id`. + type: :file_id + ) + end + + sig { override.returns({ id: String, type: Symbol }) } + def to_hash + end + end + + class Responses < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses, + OpenAI::Internal::AnyHash + ) + end + + # The type of run data source. Always `responses`. + sig { returns(Symbol) } + attr_accessor :type + + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_after + + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + sig { returns(T.nilable(Integer)) } + attr_accessor :created_before + + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + sig { returns(T.nilable(String)) } + attr_accessor :instructions_search + + # Metadata filter for the responses. This is a query parameter used to select + # responses. + sig { returns(T.nilable(T.anything)) } + attr_accessor :metadata + + # The name of the model to find responses for. This is a query parameter used to + # select responses. + sig { returns(T.nilable(String)) } + attr_accessor :model + + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + sig do + returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) + end + attr_accessor :reasoning_effort + + # Sampling temperature. This is a query parameter used to select responses. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # List of tool names. This is a query parameter used to select responses. + sig { returns(T.nilable(T::Array[String])) } + attr_accessor :tools + + # Nucleus sampling parameter. This is a query parameter used to select responses. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # List of user identifiers. This is a query parameter used to select responses. + sig { returns(T.nilable(T::Array[String])) } + attr_accessor :users + + # A EvalResponsesSource object describing a run data source configuration. + sig do + params( + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + instructions_search: T.nilable(String), + metadata: T.nilable(T.anything), + model: T.nilable(String), + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::OrSymbol), + temperature: T.nilable(Float), + tools: T.nilable(T::Array[String]), + top_p: T.nilable(Float), + users: T.nilable(T::Array[String]), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Only include items created after this timestamp (inclusive). This is a query + # parameter used to select responses. + created_after: nil, + # Only include items created before this timestamp (inclusive). This is a query + # parameter used to select responses. + created_before: nil, + # Optional string to search the 'instructions' field. This is a query parameter + # used to select responses. + instructions_search: nil, + # Metadata filter for the responses. This is a query parameter used to select + # responses. + metadata: nil, + # The name of the model to find responses for. This is a query parameter used to + # select responses. + model: nil, + # Optional reasoning effort parameter. This is a query parameter used to select + # responses. + reasoning_effort: nil, + # Sampling temperature. This is a query parameter used to select responses. + temperature: nil, + # List of tool names. This is a query parameter used to select responses. + tools: nil, + # Nucleus sampling parameter. This is a query parameter used to select responses. + top_p: nil, + # List of user identifiers. This is a query parameter used to select responses. + users: nil, + # The type of run data source. Always `responses`. + type: :responses + ) + end + + sig do + override.returns( + { + type: Symbol, + created_after: T.nilable(Integer), + created_before: T.nilable(Integer), + instructions_search: T.nilable(String), + metadata: T.nilable(T.anything), + model: T.nilable(String), + reasoning_effort: + T.nilable(OpenAI::ReasoningEffort::TaggedSymbol), + temperature: T.nilable(Float), + tools: T.nilable(T::Array[String]), + top_p: T.nilable(Float), + users: T.nilable(T::Array[String]) + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Variants + ] + ) + end + def self.variants + end + end + + # Used when sampling from a model. Dictates the structure of the messages passed + # into the model. Can either be a reference to a prebuilt trajectory (ie, + # `item.input_trajectory`), or a template with variable references to the `item` + # namespace. + module InputMessages + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference + ) + end + + class Template < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template, + OpenAI::Internal::AnyHash + ) + end + + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ] + ) + end + attr_accessor :template + + # The type of input messages. Always `template`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + template: + T::Array[ + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage::OrHash, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::OrHash + ) + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A list of chat messages forming the prompt or context. May include variable + # references to the `item` namespace, ie {{item.name}}. + template:, + # The type of input messages. Always `template`. + type: :template + ) + end + + sig do + override.returns( + { + template: + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ], + type: Symbol + } + ) + end + def to_hash + end + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module Template + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem + ) + end + + class ChatMessage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message. + sig { returns(String) } + attr_accessor :content + + # The role of the message (e.g. "system", "assistant", "user"). + sig { returns(String) } + attr_accessor :role + + sig do + params(content: String, role: String).returns( + T.attached_class + ) + end + def self.new( + # The content of the message. + content:, + # The role of the message (e.g. "system", "assistant", "user"). + role: + ) + end + + sig { override.returns({ content: String, role: String }) } + def to_hash + end + end + + class EvalItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem, + OpenAI::Internal::AnyHash + ) + end + + # Inputs to the model - can contain template strings. + sig do + returns( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants + ) + end + attr_accessor :content + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + sig do + returns( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + end + attr_accessor :role + + # The type of the message input. Always `message`. + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + sig do + params( + content: + T.any( + String, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText::OrHash, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage::OrHash, + T::Array[T.anything] + ), + role: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::OrSymbol, + type: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Inputs to the model - can contain template strings. + content:, + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + role:, + # The type of the message input. Always `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants, + role: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol, + type: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + } + ) + end + def to_hash + end + + # Inputs to the model - can contain template strings. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, + T::Array[T.anything] + ) + end + + class OutputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, + OpenAI::Internal::AnyHash + ) + end + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + # A text output from the model. + sig do + params(text: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The text output from the model. + text:, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class InputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, + OpenAI::Internal::AnyHash + ) + end + + # The URL of the image input. + sig { returns(String) } + attr_accessor :image_url + + # The type of the image input. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig { returns(T.nilable(String)) } + attr_reader :detail + + sig { params(detail: String).void } + attr_writer :detail + + # An image input to the model. + sig do + params( + image_url: String, + detail: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The URL of the image input. + image_url:, + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail: nil, + # The type of the image input. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { image_url: String, type: Symbol, detail: String } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::Variants + ] + ) + end + def self.variants + end + + AnArrayOfInputTextAndInputImageArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::Unknown + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::Variants + ] + ) + end + def self.variants + end + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference, + OpenAI::Internal::AnyHash + ) + end + + # A reference to a variable in the `item` namespace. Ie, "item.name" + sig { returns(String) } + attr_accessor :item_reference + + # The type of input messages. Always `item_reference`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params(item_reference: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # A reference to a variable in the `item` namespace. Ie, "item.name" + item_reference:, + # The type of input messages. Always `item_reference`. + type: :item_reference + ) + end + + sig do + override.returns({ item_reference: String, type: Symbol }) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Variants + ] + ) + end + def self.variants + end + end + + class SamplingParams < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams, + OpenAI::Internal::AnyHash + ) + end + + # The maximum number of tokens in the generated output. + sig { returns(T.nilable(Integer)) } + attr_reader :max_completion_tokens + + sig { params(max_completion_tokens: Integer).void } + attr_writer :max_completion_tokens + + # A seed value to initialize the randomness, during sampling. + sig { returns(T.nilable(Integer)) } + attr_reader :seed + + sig { params(seed: Integer).void } + attr_writer :seed + + # A higher temperature increases randomness in the outputs. + sig { returns(T.nilable(Float)) } + attr_reader :temperature + + sig { params(temperature: Float).void } + attr_writer :temperature + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + returns( + T.nilable( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text + ) + ) + end + attr_reader :text + + sig do + params( + text: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text::OrHash + ).void + end + attr_writer :text + + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + sig do + returns(T.nilable(T::Array[OpenAI::Responses::Tool::Variants])) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ] + ).void + end + attr_writer :tools + + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + sig { returns(T.nilable(Float)) } + attr_reader :top_p + + sig { params(top_p: Float).void } + attr_writer :top_p + + sig do + params( + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text::OrHash, + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ], + top_p: Float + ).returns(T.attached_class) + end + def self.new( + # The maximum number of tokens in the generated output. + max_completion_tokens: nil, + # A seed value to initialize the randomness, during sampling. + seed: nil, + # A higher temperature increases randomness in the outputs. + temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + text: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + tools: nil, + # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + top_p: nil + ) + end + + sig do + override.returns( + { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, + tools: T::Array[OpenAI::Responses::Tool::Variants], + top_p: Float + } + ) + end + def to_hash + end + + class Text < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, + OpenAI::Internal::AnyHash + ) + end + + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseFormatTextConfig::Variants + ) + ) + end + attr_reader :format_ + + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).void + end + attr_writer :format_ + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + format_: nil + ) + end + + sig do + override.returns( + { + format_: + OpenAI::Responses::ResponseFormatTextConfig::Variants + } + ) + end + def to_hash + end + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Variants + ] + ) + end + def self.variants + end + end + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens retrieved from cache. + sig { returns(Integer) } + attr_accessor :cached_tokens + + # The number of completion tokens generated. + sig { returns(Integer) } + attr_accessor :completion_tokens + + # The number of invocations. + sig { returns(Integer) } + attr_accessor :invocation_count + + # The name of the model. + sig { returns(String) } + attr_accessor :model_name + + # The number of prompt tokens used. + sig { returns(Integer) } + attr_accessor :prompt_tokens + + # The total number of tokens used. + sig { returns(Integer) } + attr_accessor :total_tokens + + sig do + params( + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens retrieved from cache. + cached_tokens:, + # The number of completion tokens generated. + completion_tokens:, + # The number of invocations. + invocation_count:, + # The name of the model. + model_name:, + # The number of prompt tokens used. + prompt_tokens:, + # The total number of tokens used. + total_tokens: + ) + end + + sig do + override.returns( + { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + end + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult, + OpenAI::Internal::AnyHash + ) + end + + # Number of tests failed for this criteria. + sig { returns(Integer) } + attr_accessor :failed + + # Number of tests passed for this criteria. + sig { returns(Integer) } + attr_accessor :passed + + # A description of the testing criteria. + sig { returns(String) } + attr_accessor :testing_criteria + + sig do + params( + failed: Integer, + passed: Integer, + testing_criteria: String + ).returns(T.attached_class) + end + def self.new( + # Number of tests failed for this criteria. + failed:, + # Number of tests passed for this criteria. + passed:, + # A description of the testing criteria. + testing_criteria: + ) + end + + sig do + override.returns( + { failed: Integer, passed: Integer, testing_criteria: String } + ) + end + def to_hash + end + end + + class ResultCounts < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts, + OpenAI::Internal::AnyHash + ) + end + + # Number of output items that resulted in an error. + sig { returns(Integer) } + attr_accessor :errored + + # Number of output items that failed to pass the evaluation. + sig { returns(Integer) } + attr_accessor :failed + + # Number of output items that passed the evaluation. + sig { returns(Integer) } + attr_accessor :passed + + # Total number of executed output items. + sig { returns(Integer) } + attr_accessor :total + + # Counters summarizing the outcomes of the evaluation run. + sig do + params( + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + ).returns(T.attached_class) + end + def self.new( + # Number of output items that resulted in an error. + errored:, + # Number of output items that failed to pass the evaluation. + failed:, + # Number of output items that passed the evaluation. + passed:, + # Total number of executed output items. + total: + ) + end + + sig do + override.returns( + { + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/runs/output_item_list_params.rbi b/rbi/openai/models/evals/runs/output_item_list_params.rbi new file mode 100644 index 00000000..c4bfe68e --- /dev/null +++ b/rbi/openai/models/evals/runs/output_item_list_params.rbi @@ -0,0 +1,186 @@ +# typed: strong + +module OpenAI + module Models + module Evals + module Runs + class OutputItemListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::Runs::OutputItemListParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :eval_id + + # Identifier for the last output item from the previous pagination request. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Number of output items to retrieve. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order for output items by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + sig do + returns( + T.nilable( + OpenAI::Evals::Runs::OutputItemListParams::Order::OrSymbol + ) + ) + end + attr_reader :order + + sig do + params( + order: OpenAI::Evals::Runs::OutputItemListParams::Order::OrSymbol + ).void + end + attr_writer :order + + # Filter output items by status. Use `failed` to filter by failed output items or + # `pass` to filter by passed output items. + sig do + returns( + T.nilable( + OpenAI::Evals::Runs::OutputItemListParams::Status::OrSymbol + ) + ) + end + attr_reader :status + + sig do + params( + status: + OpenAI::Evals::Runs::OutputItemListParams::Status::OrSymbol + ).void + end + attr_writer :status + + sig do + params( + eval_id: String, + after: String, + limit: Integer, + order: OpenAI::Evals::Runs::OutputItemListParams::Order::OrSymbol, + status: + OpenAI::Evals::Runs::OutputItemListParams::Status::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + eval_id:, + # Identifier for the last output item from the previous pagination request. + after: nil, + # Number of output items to retrieve. + limit: nil, + # Sort order for output items by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + order: nil, + # Filter output items by status. Use `failed` to filter by failed output items or + # `pass` to filter by passed output items. + status: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + eval_id: String, + after: String, + limit: Integer, + order: + OpenAI::Evals::Runs::OutputItemListParams::Order::OrSymbol, + status: + OpenAI::Evals::Runs::OutputItemListParams::Status::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order for output items by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Evals::Runs::OutputItemListParams::Order) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let( + :asc, + OpenAI::Evals::Runs::OutputItemListParams::Order::TaggedSymbol + ) + DESC = + T.let( + :desc, + OpenAI::Evals::Runs::OutputItemListParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Evals::Runs::OutputItemListParams::Order::TaggedSymbol + ] + ) + end + def self.values + end + end + + # Filter output items by status. Use `failed` to filter by failed output items or + # `pass` to filter by passed output items. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Evals::Runs::OutputItemListParams::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + FAIL = + T.let( + :fail, + OpenAI::Evals::Runs::OutputItemListParams::Status::TaggedSymbol + ) + PASS = + T.let( + :pass, + OpenAI::Evals::Runs::OutputItemListParams::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Evals::Runs::OutputItemListParams::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/runs/output_item_list_response.rbi b/rbi/openai/models/evals/runs/output_item_list_response.rbi new file mode 100644 index 00000000..9a34bb28 --- /dev/null +++ b/rbi/openai/models/evals/runs/output_item_list_response.rbi @@ -0,0 +1,411 @@ +# typed: strong + +module OpenAI + module Models + module Evals + module Runs + class OutputItemListResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemListResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the evaluation run output item. + sig { returns(String) } + attr_accessor :id + + # Unix timestamp (in seconds) when the evaluation run was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Details of the input data source item. + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :datasource_item + + # The identifier for the data source item. + sig { returns(Integer) } + attr_accessor :datasource_item_id + + # The identifier of the evaluation group. + sig { returns(String) } + attr_accessor :eval_id + + # The type of the object. Always "eval.run.output_item". + sig { returns(Symbol) } + attr_accessor :object + + # A list of results from the evaluation run. + sig { returns(T::Array[T::Hash[Symbol, T.anything]]) } + attr_accessor :results + + # The identifier of the evaluation run associated with this output item. + sig { returns(String) } + attr_accessor :run_id + + # A sample containing the input and output of the evaluation run. + sig do + returns(OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample) + end + attr_reader :sample + + sig do + params( + sample: + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::OrHash + ).void + end + attr_writer :sample + + # The status of the evaluation run. + sig { returns(String) } + attr_accessor :status + + # A schema representing an evaluation run output item. + sig do + params( + id: String, + created_at: Integer, + datasource_item: T::Hash[Symbol, T.anything], + datasource_item_id: Integer, + eval_id: String, + results: T::Array[T::Hash[Symbol, T.anything]], + run_id: String, + sample: + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::OrHash, + status: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the evaluation run output item. + id:, + # Unix timestamp (in seconds) when the evaluation run was created. + created_at:, + # Details of the input data source item. + datasource_item:, + # The identifier for the data source item. + datasource_item_id:, + # The identifier of the evaluation group. + eval_id:, + # A list of results from the evaluation run. + results:, + # The identifier of the evaluation run associated with this output item. + run_id:, + # A sample containing the input and output of the evaluation run. + sample:, + # The status of the evaluation run. + status:, + # The type of the object. Always "eval.run.output_item". + object: :"eval.run.output_item" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + datasource_item: T::Hash[Symbol, T.anything], + datasource_item_id: Integer, + eval_id: String, + object: Symbol, + results: T::Array[T::Hash[Symbol, T.anything]], + run_id: String, + sample: + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, + status: String + } + ) + end + def to_hash + end + + class Sample < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, + OpenAI::Internal::AnyHash + ) + end + + # An object representing an error response from the Eval API. + sig { returns(OpenAI::Evals::EvalAPIError) } + attr_reader :error + + sig { params(error: OpenAI::Evals::EvalAPIError::OrHash).void } + attr_writer :error + + # The reason why the sample generation was finished. + sig { returns(String) } + attr_accessor :finish_reason + + # An array of input messages. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input + ] + ) + end + attr_accessor :input + + # The maximum number of tokens allowed for completion. + sig { returns(Integer) } + attr_accessor :max_completion_tokens + + # The model used for generating the sample. + sig { returns(String) } + attr_accessor :model + + # An array of output messages. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output + ] + ) + end + attr_accessor :output + + # The seed used for generating the sample. + sig { returns(Integer) } + attr_accessor :seed + + # The sampling temperature used. + sig { returns(Float) } + attr_accessor :temperature + + # The top_p value used for sampling. + sig { returns(Float) } + attr_accessor :top_p + + # Token usage details for the sample. + sig do + returns( + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage + ) + end + attr_reader :usage + + sig do + params( + usage: + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage::OrHash + ).void + end + attr_writer :usage + + # A sample containing the input and output of the evaluation run. + sig do + params( + error: OpenAI::Evals::EvalAPIError::OrHash, + finish_reason: String, + input: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input::OrHash + ], + max_completion_tokens: Integer, + model: String, + output: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output::OrHash + ], + seed: Integer, + temperature: Float, + top_p: Float, + usage: + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage::OrHash + ).returns(T.attached_class) + end + def self.new( + # An object representing an error response from the Eval API. + error:, + # The reason why the sample generation was finished. + finish_reason:, + # An array of input messages. + input:, + # The maximum number of tokens allowed for completion. + max_completion_tokens:, + # The model used for generating the sample. + model:, + # An array of output messages. + output:, + # The seed used for generating the sample. + seed:, + # The sampling temperature used. + temperature:, + # The top_p value used for sampling. + top_p:, + # Token usage details for the sample. + usage: + ) + end + + sig do + override.returns( + { + error: OpenAI::Evals::EvalAPIError, + finish_reason: String, + input: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input + ], + max_completion_tokens: Integer, + model: String, + output: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output + ], + seed: Integer, + temperature: Float, + top_p: Float, + usage: + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage + } + ) + end + def to_hash + end + + class Input < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message. + sig { returns(String) } + attr_accessor :content + + # The role of the message sender (e.g., system, user, developer). + sig { returns(String) } + attr_accessor :role + + # An input message. + sig do + params(content: String, role: String).returns(T.attached_class) + end + def self.new( + # The content of the message. + content:, + # The role of the message sender (e.g., system, user, developer). + role: + ) + end + + sig { override.returns({ content: String, role: String }) } + def to_hash + end + end + + class Output < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message. + sig { returns(T.nilable(String)) } + attr_reader :content + + sig { params(content: String).void } + attr_writer :content + + # The role of the message (e.g. "system", "assistant", "user"). + sig { returns(T.nilable(String)) } + attr_reader :role + + sig { params(role: String).void } + attr_writer :role + + sig do + params(content: String, role: String).returns(T.attached_class) + end + def self.new( + # The content of the message. + content: nil, + # The role of the message (e.g. "system", "assistant", "user"). + role: nil + ) + end + + sig { override.returns({ content: String, role: String }) } + def to_hash + end + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens retrieved from cache. + sig { returns(Integer) } + attr_accessor :cached_tokens + + # The number of completion tokens generated. + sig { returns(Integer) } + attr_accessor :completion_tokens + + # The number of prompt tokens used. + sig { returns(Integer) } + attr_accessor :prompt_tokens + + # The total number of tokens used. + sig { returns(Integer) } + attr_accessor :total_tokens + + # Token usage details for the sample. + sig do + params( + cached_tokens: Integer, + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens retrieved from cache. + cached_tokens:, + # The number of completion tokens generated. + completion_tokens:, + # The number of prompt tokens used. + prompt_tokens:, + # The total number of tokens used. + total_tokens: + ) + end + + sig do + override.returns( + { + cached_tokens: Integer, + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/runs/output_item_retrieve_params.rbi b/rbi/openai/models/evals/runs/output_item_retrieve_params.rbi new file mode 100644 index 00000000..59e73a99 --- /dev/null +++ b/rbi/openai/models/evals/runs/output_item_retrieve_params.rbi @@ -0,0 +1,50 @@ +# typed: strong + +module OpenAI + module Models + module Evals + module Runs + class OutputItemRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Evals::Runs::OutputItemRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :eval_id + + sig { returns(String) } + attr_accessor :run_id + + sig do + params( + eval_id: String, + run_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(eval_id:, run_id:, request_options: {}) + end + + sig do + override.returns( + { + eval_id: String, + run_id: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/evals/runs/output_item_retrieve_response.rbi b/rbi/openai/models/evals/runs/output_item_retrieve_response.rbi new file mode 100644 index 00000000..8a39ad5e --- /dev/null +++ b/rbi/openai/models/evals/runs/output_item_retrieve_response.rbi @@ -0,0 +1,413 @@ +# typed: strong + +module OpenAI + module Models + module Evals + module Runs + class OutputItemRetrieveResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse, + OpenAI::Internal::AnyHash + ) + end + + # Unique identifier for the evaluation run output item. + sig { returns(String) } + attr_accessor :id + + # Unix timestamp (in seconds) when the evaluation run was created. + sig { returns(Integer) } + attr_accessor :created_at + + # Details of the input data source item. + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :datasource_item + + # The identifier for the data source item. + sig { returns(Integer) } + attr_accessor :datasource_item_id + + # The identifier of the evaluation group. + sig { returns(String) } + attr_accessor :eval_id + + # The type of the object. Always "eval.run.output_item". + sig { returns(Symbol) } + attr_accessor :object + + # A list of results from the evaluation run. + sig { returns(T::Array[T::Hash[Symbol, T.anything]]) } + attr_accessor :results + + # The identifier of the evaluation run associated with this output item. + sig { returns(String) } + attr_accessor :run_id + + # A sample containing the input and output of the evaluation run. + sig do + returns( + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample + ) + end + attr_reader :sample + + sig do + params( + sample: + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::OrHash + ).void + end + attr_writer :sample + + # The status of the evaluation run. + sig { returns(String) } + attr_accessor :status + + # A schema representing an evaluation run output item. + sig do + params( + id: String, + created_at: Integer, + datasource_item: T::Hash[Symbol, T.anything], + datasource_item_id: Integer, + eval_id: String, + results: T::Array[T::Hash[Symbol, T.anything]], + run_id: String, + sample: + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::OrHash, + status: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for the evaluation run output item. + id:, + # Unix timestamp (in seconds) when the evaluation run was created. + created_at:, + # Details of the input data source item. + datasource_item:, + # The identifier for the data source item. + datasource_item_id:, + # The identifier of the evaluation group. + eval_id:, + # A list of results from the evaluation run. + results:, + # The identifier of the evaluation run associated with this output item. + run_id:, + # A sample containing the input and output of the evaluation run. + sample:, + # The status of the evaluation run. + status:, + # The type of the object. Always "eval.run.output_item". + object: :"eval.run.output_item" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + datasource_item: T::Hash[Symbol, T.anything], + datasource_item_id: Integer, + eval_id: String, + object: Symbol, + results: T::Array[T::Hash[Symbol, T.anything]], + run_id: String, + sample: + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, + status: String + } + ) + end + def to_hash + end + + class Sample < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, + OpenAI::Internal::AnyHash + ) + end + + # An object representing an error response from the Eval API. + sig { returns(OpenAI::Evals::EvalAPIError) } + attr_reader :error + + sig { params(error: OpenAI::Evals::EvalAPIError::OrHash).void } + attr_writer :error + + # The reason why the sample generation was finished. + sig { returns(String) } + attr_accessor :finish_reason + + # An array of input messages. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input + ] + ) + end + attr_accessor :input + + # The maximum number of tokens allowed for completion. + sig { returns(Integer) } + attr_accessor :max_completion_tokens + + # The model used for generating the sample. + sig { returns(String) } + attr_accessor :model + + # An array of output messages. + sig do + returns( + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output + ] + ) + end + attr_accessor :output + + # The seed used for generating the sample. + sig { returns(Integer) } + attr_accessor :seed + + # The sampling temperature used. + sig { returns(Float) } + attr_accessor :temperature + + # The top_p value used for sampling. + sig { returns(Float) } + attr_accessor :top_p + + # Token usage details for the sample. + sig do + returns( + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage + ) + end + attr_reader :usage + + sig do + params( + usage: + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage::OrHash + ).void + end + attr_writer :usage + + # A sample containing the input and output of the evaluation run. + sig do + params( + error: OpenAI::Evals::EvalAPIError::OrHash, + finish_reason: String, + input: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input::OrHash + ], + max_completion_tokens: Integer, + model: String, + output: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output::OrHash + ], + seed: Integer, + temperature: Float, + top_p: Float, + usage: + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage::OrHash + ).returns(T.attached_class) + end + def self.new( + # An object representing an error response from the Eval API. + error:, + # The reason why the sample generation was finished. + finish_reason:, + # An array of input messages. + input:, + # The maximum number of tokens allowed for completion. + max_completion_tokens:, + # The model used for generating the sample. + model:, + # An array of output messages. + output:, + # The seed used for generating the sample. + seed:, + # The sampling temperature used. + temperature:, + # The top_p value used for sampling. + top_p:, + # Token usage details for the sample. + usage: + ) + end + + sig do + override.returns( + { + error: OpenAI::Evals::EvalAPIError, + finish_reason: String, + input: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input + ], + max_completion_tokens: Integer, + model: String, + output: + T::Array[ + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output + ], + seed: Integer, + temperature: Float, + top_p: Float, + usage: + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage + } + ) + end + def to_hash + end + + class Input < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message. + sig { returns(String) } + attr_accessor :content + + # The role of the message sender (e.g., system, user, developer). + sig { returns(String) } + attr_accessor :role + + # An input message. + sig do + params(content: String, role: String).returns(T.attached_class) + end + def self.new( + # The content of the message. + content:, + # The role of the message sender (e.g., system, user, developer). + role: + ) + end + + sig { override.returns({ content: String, role: String }) } + def to_hash + end + end + + class Output < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output, + OpenAI::Internal::AnyHash + ) + end + + # The content of the message. + sig { returns(T.nilable(String)) } + attr_reader :content + + sig { params(content: String).void } + attr_writer :content + + # The role of the message (e.g. "system", "assistant", "user"). + sig { returns(T.nilable(String)) } + attr_reader :role + + sig { params(role: String).void } + attr_writer :role + + sig do + params(content: String, role: String).returns(T.attached_class) + end + def self.new( + # The content of the message. + content: nil, + # The role of the message (e.g. "system", "assistant", "user"). + role: nil + ) + end + + sig { override.returns({ content: String, role: String }) } + def to_hash + end + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens retrieved from cache. + sig { returns(Integer) } + attr_accessor :cached_tokens + + # The number of completion tokens generated. + sig { returns(Integer) } + attr_accessor :completion_tokens + + # The number of prompt tokens used. + sig { returns(Integer) } + attr_accessor :prompt_tokens + + # The total number of tokens used. + sig { returns(Integer) } + attr_accessor :total_tokens + + # Token usage details for the sample. + sig do + params( + cached_tokens: Integer, + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens retrieved from cache. + cached_tokens:, + # The number of completion tokens generated. + completion_tokens:, + # The number of prompt tokens used. + prompt_tokens:, + # The total number of tokens used. + total_tokens: + ) + end + + sig do + override.returns( + { + cached_tokens: Integer, + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/file_chunking_strategy.rbi b/rbi/openai/models/file_chunking_strategy.rbi new file mode 100644 index 00000000..74e488c7 --- /dev/null +++ b/rbi/openai/models/file_chunking_strategy.rbi @@ -0,0 +1,22 @@ +# typed: strong + +module OpenAI + module Models + # The strategy used to chunk the file. + module FileChunkingStrategy + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::StaticFileChunkingStrategyObject, + OpenAI::OtherFileChunkingStrategyObject + ) + end + + sig { override.returns(T::Array[OpenAI::FileChunkingStrategy::Variants]) } + def self.variants + end + end + end +end diff --git a/rbi/openai/models/file_chunking_strategy_param.rbi b/rbi/openai/models/file_chunking_strategy_param.rbi new file mode 100644 index 00000000..d8588bdf --- /dev/null +++ b/rbi/openai/models/file_chunking_strategy_param.rbi @@ -0,0 +1,25 @@ +# typed: strong + +module OpenAI + module Models + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + module FileChunkingStrategyParam + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::AutoFileChunkingStrategyParam, + OpenAI::StaticFileChunkingStrategyObjectParam + ) + end + + sig do + override.returns(T::Array[OpenAI::FileChunkingStrategyParam::Variants]) + end + def self.variants + end + end + end +end diff --git a/rbi/openai/models/file_content.rbi b/rbi/openai/models/file_content.rbi new file mode 100644 index 00000000..92b8b41a --- /dev/null +++ b/rbi/openai/models/file_content.rbi @@ -0,0 +1,7 @@ +# typed: strong + +module OpenAI + module Models + FileContent = String + end +end diff --git a/rbi/openai/models/file_content_params.rbi b/rbi/openai/models/file_content_params.rbi new file mode 100644 index 00000000..4d403623 --- /dev/null +++ b/rbi/openai/models/file_content_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class FileContentParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::FileContentParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/file_create_params.rbi b/rbi/openai/models/file_create_params.rbi new file mode 100644 index 00000000..ef0c24d4 --- /dev/null +++ b/rbi/openai/models/file_create_params.rbi @@ -0,0 +1,113 @@ +# typed: strong + +module OpenAI + module Models + class FileCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::FileCreateParams, OpenAI::Internal::AnyHash) + end + + # The File object (not file name) to be uploaded. + sig { returns(OpenAI::Internal::FileInput) } + attr_accessor :file + + # The intended purpose of the uploaded file. One of: - `assistants`: Used in the + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets + sig { returns(OpenAI::FilePurpose::OrSymbol) } + attr_accessor :purpose + + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + sig { returns(T.nilable(OpenAI::FileCreateParams::ExpiresAfter)) } + attr_reader :expires_after + + sig do + params( + expires_after: OpenAI::FileCreateParams::ExpiresAfter::OrHash + ).void + end + attr_writer :expires_after + + sig do + params( + file: OpenAI::Internal::FileInput, + purpose: OpenAI::FilePurpose::OrSymbol, + expires_after: OpenAI::FileCreateParams::ExpiresAfter::OrHash, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The File object (not file name) to be uploaded. + file:, + # The intended purpose of the uploaded file. One of: - `assistants`: Used in the + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets + purpose:, + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + expires_after: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + file: OpenAI::Internal::FileInput, + purpose: OpenAI::FilePurpose::OrSymbol, + expires_after: OpenAI::FileCreateParams::ExpiresAfter, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FileCreateParams::ExpiresAfter, + OpenAI::Internal::AnyHash + ) + end + + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `created_at`. + sig { returns(Symbol) } + attr_accessor :anchor + + # The number of seconds after the anchor time that the file will expire. Must be + # between 3600 (1 hour) and 2592000 (30 days). + sig { returns(Integer) } + attr_accessor :seconds + + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + sig do + params(seconds: Integer, anchor: Symbol).returns(T.attached_class) + end + def self.new( + # The number of seconds after the anchor time that the file will expire. Must be + # between 3600 (1 hour) and 2592000 (30 days). + seconds:, + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `created_at`. + anchor: :created_at + ) + end + + sig { override.returns({ anchor: Symbol, seconds: Integer }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/file_delete_params.rbi b/rbi/openai/models/file_delete_params.rbi new file mode 100644 index 00000000..8c870046 --- /dev/null +++ b/rbi/openai/models/file_delete_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class FileDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::FileDeleteParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/file_deleted.rbi b/rbi/openai/models/file_deleted.rbi new file mode 100644 index 00000000..c8a74d38 --- /dev/null +++ b/rbi/openai/models/file_deleted.rbi @@ -0,0 +1,33 @@ +# typed: strong + +module OpenAI + module Models + class FileDeleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::FileDeleted, OpenAI::Internal::AnyHash) } + + sig { returns(String) } + attr_accessor :id + + sig { returns(T::Boolean) } + attr_accessor :deleted + + sig { returns(Symbol) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: Symbol).returns( + T.attached_class + ) + end + def self.new(id:, deleted:, object: :file) + end + + sig do + override.returns({ id: String, deleted: T::Boolean, object: Symbol }) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/file_list_params.rbi b/rbi/openai/models/file_list_params.rbi new file mode 100644 index 00000000..0fb370b2 --- /dev/null +++ b/rbi/openai/models/file_list_params.rbi @@ -0,0 +1,110 @@ +# typed: strong + +module OpenAI + module Models + class FileListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::FileListParams, OpenAI::Internal::AnyHash) + end + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 10,000, and the default is 10,000. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + sig { returns(T.nilable(OpenAI::FileListParams::Order::OrSymbol)) } + attr_reader :order + + sig { params(order: OpenAI::FileListParams::Order::OrSymbol).void } + attr_writer :order + + # Only return files with the given purpose. + sig { returns(T.nilable(String)) } + attr_reader :purpose + + sig { params(purpose: String).void } + attr_writer :purpose + + sig do + params( + after: String, + limit: Integer, + order: OpenAI::FileListParams::Order::OrSymbol, + purpose: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 10,000, and the default is 10,000. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + # Only return files with the given purpose. + purpose: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + order: OpenAI::FileListParams::Order::OrSymbol, + purpose: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::FileListParams::Order) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = T.let(:asc, OpenAI::FileListParams::Order::TaggedSymbol) + DESC = T.let(:desc, OpenAI::FileListParams::Order::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::FileListParams::Order::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/file_object.rbi b/rbi/openai/models/file_object.rbi new file mode 100644 index 00000000..8f7ab4ab --- /dev/null +++ b/rbi/openai/models/file_object.rbi @@ -0,0 +1,165 @@ +# typed: strong + +module OpenAI + module Models + class FileObject < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::FileObject, OpenAI::Internal::AnyHash) } + + # The file identifier, which can be referenced in the API endpoints. + sig { returns(String) } + attr_accessor :id + + # The size of the file, in bytes. + sig { returns(Integer) } + attr_accessor :bytes + + # The Unix timestamp (in seconds) for when the file was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The name of the file. + sig { returns(String) } + attr_accessor :filename + + # The object type, which is always `file`. + sig { returns(Symbol) } + attr_accessor :object + + # The intended purpose of the file. Supported values are `assistants`, + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results`, + # `vision`, and `user_data`. + sig { returns(OpenAI::FileObject::Purpose::TaggedSymbol) } + attr_accessor :purpose + + # Deprecated. The current status of the file, which can be either `uploaded`, + # `processed`, or `error`. + sig { returns(OpenAI::FileObject::Status::TaggedSymbol) } + attr_accessor :status + + # The Unix timestamp (in seconds) for when the file will expire. + sig { returns(T.nilable(Integer)) } + attr_reader :expires_at + + sig { params(expires_at: Integer).void } + attr_writer :expires_at + + # Deprecated. For details on why a fine-tuning training file failed validation, + # see the `error` field on `fine_tuning.job`. + sig { returns(T.nilable(String)) } + attr_reader :status_details + + sig { params(status_details: String).void } + attr_writer :status_details + + # The `File` object represents a document that has been uploaded to OpenAI. + sig do + params( + id: String, + bytes: Integer, + created_at: Integer, + filename: String, + purpose: OpenAI::FileObject::Purpose::OrSymbol, + status: OpenAI::FileObject::Status::OrSymbol, + expires_at: Integer, + status_details: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The file identifier, which can be referenced in the API endpoints. + id:, + # The size of the file, in bytes. + bytes:, + # The Unix timestamp (in seconds) for when the file was created. + created_at:, + # The name of the file. + filename:, + # The intended purpose of the file. Supported values are `assistants`, + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results`, + # `vision`, and `user_data`. + purpose:, + # Deprecated. The current status of the file, which can be either `uploaded`, + # `processed`, or `error`. + status:, + # The Unix timestamp (in seconds) for when the file will expire. + expires_at: nil, + # Deprecated. For details on why a fine-tuning training file failed validation, + # see the `error` field on `fine_tuning.job`. + status_details: nil, + # The object type, which is always `file`. + object: :file + ) + end + + sig do + override.returns( + { + id: String, + bytes: Integer, + created_at: Integer, + filename: String, + object: Symbol, + purpose: OpenAI::FileObject::Purpose::TaggedSymbol, + status: OpenAI::FileObject::Status::TaggedSymbol, + expires_at: Integer, + status_details: String + } + ) + end + def to_hash + end + + # The intended purpose of the file. Supported values are `assistants`, + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results`, + # `vision`, and `user_data`. + module Purpose + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::FileObject::Purpose) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASSISTANTS = + T.let(:assistants, OpenAI::FileObject::Purpose::TaggedSymbol) + ASSISTANTS_OUTPUT = + T.let(:assistants_output, OpenAI::FileObject::Purpose::TaggedSymbol) + BATCH = T.let(:batch, OpenAI::FileObject::Purpose::TaggedSymbol) + BATCH_OUTPUT = + T.let(:batch_output, OpenAI::FileObject::Purpose::TaggedSymbol) + FINE_TUNE = + T.let(:"fine-tune", OpenAI::FileObject::Purpose::TaggedSymbol) + FINE_TUNE_RESULTS = + T.let(:"fine-tune-results", OpenAI::FileObject::Purpose::TaggedSymbol) + VISION = T.let(:vision, OpenAI::FileObject::Purpose::TaggedSymbol) + USER_DATA = T.let(:user_data, OpenAI::FileObject::Purpose::TaggedSymbol) + + sig do + override.returns(T::Array[OpenAI::FileObject::Purpose::TaggedSymbol]) + end + def self.values + end + end + + # Deprecated. The current status of the file, which can be either `uploaded`, + # `processed`, or `error`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::FileObject::Status) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + UPLOADED = T.let(:uploaded, OpenAI::FileObject::Status::TaggedSymbol) + PROCESSED = T.let(:processed, OpenAI::FileObject::Status::TaggedSymbol) + ERROR = T.let(:error, OpenAI::FileObject::Status::TaggedSymbol) + + sig do + override.returns(T::Array[OpenAI::FileObject::Status::TaggedSymbol]) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/file_purpose.rbi b/rbi/openai/models/file_purpose.rbi new file mode 100644 index 00000000..6b9af2cd --- /dev/null +++ b/rbi/openai/models/file_purpose.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + # The intended purpose of the uploaded file. One of: - `assistants`: Used in the + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets + module FilePurpose + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::FilePurpose) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASSISTANTS = T.let(:assistants, OpenAI::FilePurpose::TaggedSymbol) + BATCH = T.let(:batch, OpenAI::FilePurpose::TaggedSymbol) + FINE_TUNE = T.let(:"fine-tune", OpenAI::FilePurpose::TaggedSymbol) + VISION = T.let(:vision, OpenAI::FilePurpose::TaggedSymbol) + USER_DATA = T.let(:user_data, OpenAI::FilePurpose::TaggedSymbol) + EVALS = T.let(:evals, OpenAI::FilePurpose::TaggedSymbol) + + sig { override.returns(T::Array[OpenAI::FilePurpose::TaggedSymbol]) } + def self.values + end + end + end +end diff --git a/rbi/openai/models/file_retrieve_params.rbi b/rbi/openai/models/file_retrieve_params.rbi new file mode 100644 index 00000000..f5c3c562 --- /dev/null +++ b/rbi/openai/models/file_retrieve_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class FileRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::FileRetrieveParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi b/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi new file mode 100644 index 00000000..f9834f5b --- /dev/null +++ b/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi @@ -0,0 +1,131 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Alpha + class GraderRunParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::Alpha::GraderRunParams, + OpenAI::Internal::AnyHash + ) + end + + # The grader used for the fine-tuning job. + sig do + returns( + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::MultiGrader + ) + ) + end + attr_accessor :grader + + # The model sample to be evaluated. This value will be used to populate the + # `sample` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + # The `output_json` variable will be populated if the model sample is a valid JSON + # string. + sig { returns(String) } + attr_accessor :model_sample + + # The dataset item provided to the grader. This will be used to populate the + # `item` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + sig { returns(T.nilable(T.anything)) } + attr_reader :item + + sig { params(item: T.anything).void } + attr_writer :item + + sig do + params( + grader: + T.any( + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::Graders::TextSimilarityGrader::OrHash, + OpenAI::Graders::PythonGrader::OrHash, + OpenAI::Graders::ScoreModelGrader::OrHash, + OpenAI::Graders::MultiGrader::OrHash + ), + model_sample: String, + item: T.anything, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The grader used for the fine-tuning job. + grader:, + # The model sample to be evaluated. This value will be used to populate the + # `sample` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + # The `output_json` variable will be populated if the model sample is a valid JSON + # string. + model_sample:, + # The dataset item provided to the grader. This will be used to populate the + # `item` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + item: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + grader: + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::MultiGrader + ), + model_sample: String, + item: T.anything, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The grader used for the fine-tuning job. + module Grader + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::MultiGrader + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::Alpha::GraderRunParams::Grader::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/alpha/grader_run_response.rbi b/rbi/openai/models/fine_tuning/alpha/grader_run_response.rbi new file mode 100644 index 00000000..18e6589a --- /dev/null +++ b/rbi/openai/models/fine_tuning/alpha/grader_run_response.rbi @@ -0,0 +1,268 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Alpha + class GraderRunResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::FineTuning::Alpha::GraderRunResponse, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata + ) + end + attr_reader :metadata + + sig do + params( + metadata: + OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::OrHash + ).void + end + attr_writer :metadata + + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :model_grader_token_usage_per_model + + sig { returns(Float) } + attr_accessor :reward + + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :sub_rewards + + sig do + params( + metadata: + OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::OrHash, + model_grader_token_usage_per_model: T::Hash[Symbol, T.anything], + reward: Float, + sub_rewards: T::Hash[Symbol, T.anything] + ).returns(T.attached_class) + end + def self.new( + metadata:, + model_grader_token_usage_per_model:, + reward:, + sub_rewards: + ) + end + + sig do + override.returns( + { + metadata: + OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata, + model_grader_token_usage_per_model: T::Hash[Symbol, T.anything], + reward: Float, + sub_rewards: T::Hash[Symbol, T.anything] + } + ) + end + def to_hash + end + + class Metadata < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors + ) + end + attr_reader :errors + + sig do + params( + errors: + OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors::OrHash + ).void + end + attr_writer :errors + + sig { returns(Float) } + attr_accessor :execution_time + + sig { returns(String) } + attr_accessor :name + + sig { returns(T.nilable(String)) } + attr_accessor :sampled_model_name + + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :scores + + sig { returns(T.nilable(Integer)) } + attr_accessor :token_usage + + sig { returns(String) } + attr_accessor :type + + sig do + params( + errors: + OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors::OrHash, + execution_time: Float, + name: String, + sampled_model_name: T.nilable(String), + scores: T::Hash[Symbol, T.anything], + token_usage: T.nilable(Integer), + type: String + ).returns(T.attached_class) + end + def self.new( + errors:, + execution_time:, + name:, + sampled_model_name:, + scores:, + token_usage:, + type: + ) + end + + sig do + override.returns( + { + errors: + OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors, + execution_time: Float, + name: String, + sampled_model_name: T.nilable(String), + scores: T::Hash[Symbol, T.anything], + token_usage: T.nilable(Integer), + type: String + } + ) + end + def to_hash + end + + class Errors < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T::Boolean) } + attr_accessor :formula_parse_error + + sig { returns(T::Boolean) } + attr_accessor :invalid_variable_error + + sig { returns(T::Boolean) } + attr_accessor :model_grader_parse_error + + sig { returns(T::Boolean) } + attr_accessor :model_grader_refusal_error + + sig { returns(T::Boolean) } + attr_accessor :model_grader_server_error + + sig { returns(T.nilable(String)) } + attr_accessor :model_grader_server_error_details + + sig { returns(T::Boolean) } + attr_accessor :other_error + + sig { returns(T::Boolean) } + attr_accessor :python_grader_runtime_error + + sig { returns(T.nilable(String)) } + attr_accessor :python_grader_runtime_error_details + + sig { returns(T::Boolean) } + attr_accessor :python_grader_server_error + + sig { returns(T.nilable(String)) } + attr_accessor :python_grader_server_error_type + + sig { returns(T::Boolean) } + attr_accessor :sample_parse_error + + sig { returns(T::Boolean) } + attr_accessor :truncated_observation_error + + sig { returns(T::Boolean) } + attr_accessor :unresponsive_reward_error + + sig do + params( + formula_parse_error: T::Boolean, + invalid_variable_error: T::Boolean, + model_grader_parse_error: T::Boolean, + model_grader_refusal_error: T::Boolean, + model_grader_server_error: T::Boolean, + model_grader_server_error_details: T.nilable(String), + other_error: T::Boolean, + python_grader_runtime_error: T::Boolean, + python_grader_runtime_error_details: T.nilable(String), + python_grader_server_error: T::Boolean, + python_grader_server_error_type: T.nilable(String), + sample_parse_error: T::Boolean, + truncated_observation_error: T::Boolean, + unresponsive_reward_error: T::Boolean + ).returns(T.attached_class) + end + def self.new( + formula_parse_error:, + invalid_variable_error:, + model_grader_parse_error:, + model_grader_refusal_error:, + model_grader_server_error:, + model_grader_server_error_details:, + other_error:, + python_grader_runtime_error:, + python_grader_runtime_error_details:, + python_grader_server_error:, + python_grader_server_error_type:, + sample_parse_error:, + truncated_observation_error:, + unresponsive_reward_error: + ) + end + + sig do + override.returns( + { + formula_parse_error: T::Boolean, + invalid_variable_error: T::Boolean, + model_grader_parse_error: T::Boolean, + model_grader_refusal_error: T::Boolean, + model_grader_server_error: T::Boolean, + model_grader_server_error_details: T.nilable(String), + other_error: T::Boolean, + python_grader_runtime_error: T::Boolean, + python_grader_runtime_error_details: T.nilable(String), + python_grader_server_error: T::Boolean, + python_grader_server_error_type: T.nilable(String), + sample_parse_error: T::Boolean, + truncated_observation_error: T::Boolean, + unresponsive_reward_error: T::Boolean + } + ) + end + def to_hash + end + end + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/alpha/grader_validate_params.rbi b/rbi/openai/models/fine_tuning/alpha/grader_validate_params.rbi new file mode 100644 index 00000000..f8dc6077 --- /dev/null +++ b/rbi/openai/models/fine_tuning/alpha/grader_validate_params.rbi @@ -0,0 +1,100 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Alpha + class GraderValidateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::Alpha::GraderValidateParams, + OpenAI::Internal::AnyHash + ) + end + + # The grader used for the fine-tuning job. + sig do + returns( + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::MultiGrader + ) + ) + end + attr_accessor :grader + + sig do + params( + grader: + T.any( + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::Graders::TextSimilarityGrader::OrHash, + OpenAI::Graders::PythonGrader::OrHash, + OpenAI::Graders::ScoreModelGrader::OrHash, + OpenAI::Graders::MultiGrader::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The grader used for the fine-tuning job. + grader:, + request_options: {} + ) + end + + sig do + override.returns( + { + grader: + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::MultiGrader + ), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The grader used for the fine-tuning job. + module Grader + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::MultiGrader + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::Alpha::GraderValidateParams::Grader::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/alpha/grader_validate_response.rbi b/rbi/openai/models/fine_tuning/alpha/grader_validate_response.rbi new file mode 100644 index 00000000..20651675 --- /dev/null +++ b/rbi/openai/models/fine_tuning/alpha/grader_validate_response.rbi @@ -0,0 +1,98 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Alpha + class GraderValidateResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::FineTuning::Alpha::GraderValidateResponse, + OpenAI::Internal::AnyHash + ) + end + + # The grader used for the fine-tuning job. + sig do + returns( + T.nilable( + OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::Grader::Variants + ) + ) + end + attr_reader :grader + + sig do + params( + grader: + T.any( + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::Graders::TextSimilarityGrader::OrHash, + OpenAI::Graders::PythonGrader::OrHash, + OpenAI::Graders::ScoreModelGrader::OrHash, + OpenAI::Graders::MultiGrader::OrHash + ) + ).void + end + attr_writer :grader + + sig do + params( + grader: + T.any( + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::Graders::TextSimilarityGrader::OrHash, + OpenAI::Graders::PythonGrader::OrHash, + OpenAI::Graders::ScoreModelGrader::OrHash, + OpenAI::Graders::MultiGrader::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # The grader used for the fine-tuning job. + grader: nil + ) + end + + sig do + override.returns( + { + grader: + OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::Grader::Variants + } + ) + end + def to_hash + end + + # The grader used for the fine-tuning job. + module Grader + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::MultiGrader + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::Grader::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/checkpoints/permission_create_params.rbi b/rbi/openai/models/fine_tuning/checkpoints/permission_create_params.rbi new file mode 100644 index 00000000..26583ba9 --- /dev/null +++ b/rbi/openai/models/fine_tuning/checkpoints/permission_create_params.rbi @@ -0,0 +1,50 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Checkpoints + class PermissionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::Checkpoints::PermissionCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # The project identifiers to grant access to. + sig { returns(T::Array[String]) } + attr_accessor :project_ids + + sig do + params( + project_ids: T::Array[String], + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The project identifiers to grant access to. + project_ids:, + request_options: {} + ) + end + + sig do + override.returns( + { + project_ids: T::Array[String], + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/checkpoints/permission_create_response.rbi b/rbi/openai/models/fine_tuning/checkpoints/permission_create_response.rbi new file mode 100644 index 00000000..356802c5 --- /dev/null +++ b/rbi/openai/models/fine_tuning/checkpoints/permission_create_response.rbi @@ -0,0 +1,70 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Checkpoints + class PermissionCreateResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::FineTuning::Checkpoints::PermissionCreateResponse, + OpenAI::Internal::AnyHash + ) + end + + # The permission identifier, which can be referenced in the API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the permission was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The object type, which is always "checkpoint.permission". + sig { returns(Symbol) } + attr_accessor :object + + # The project identifier that the permission is for. + sig { returns(String) } + attr_accessor :project_id + + # The `checkpoint.permission` object represents a permission for a fine-tuned + # model checkpoint. + sig do + params( + id: String, + created_at: Integer, + project_id: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The permission identifier, which can be referenced in the API endpoints. + id:, + # The Unix timestamp (in seconds) for when the permission was created. + created_at:, + # The project identifier that the permission is for. + project_id:, + # The object type, which is always "checkpoint.permission". + object: :"checkpoint.permission" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + object: Symbol, + project_id: String + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/checkpoints/permission_delete_params.rbi b/rbi/openai/models/fine_tuning/checkpoints/permission_delete_params.rbi new file mode 100644 index 00000000..8051825a --- /dev/null +++ b/rbi/openai/models/fine_tuning/checkpoints/permission_delete_params.rbi @@ -0,0 +1,45 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Checkpoints + class PermissionDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::Checkpoints::PermissionDeleteParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :fine_tuned_model_checkpoint + + sig do + params( + fine_tuned_model_checkpoint: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(fine_tuned_model_checkpoint:, request_options: {}) + end + + sig do + override.returns( + { + fine_tuned_model_checkpoint: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/checkpoints/permission_delete_response.rbi b/rbi/openai/models/fine_tuning/checkpoints/permission_delete_response.rbi new file mode 100644 index 00000000..b0b5d45b --- /dev/null +++ b/rbi/openai/models/fine_tuning/checkpoints/permission_delete_response.rbi @@ -0,0 +1,54 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Checkpoints + class PermissionDeleteResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteResponse, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the fine-tuned model checkpoint permission that was deleted. + sig { returns(String) } + attr_accessor :id + + # Whether the fine-tuned model checkpoint permission was successfully deleted. + sig { returns(T::Boolean) } + attr_accessor :deleted + + # The object type, which is always "checkpoint.permission". + sig { returns(Symbol) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The ID of the fine-tuned model checkpoint permission that was deleted. + id:, + # Whether the fine-tuned model checkpoint permission was successfully deleted. + deleted:, + # The object type, which is always "checkpoint.permission". + object: :"checkpoint.permission" + ) + end + + sig do + override.returns( + { id: String, deleted: T::Boolean, object: Symbol } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_params.rbi b/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_params.rbi new file mode 100644 index 00000000..56996168 --- /dev/null +++ b/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_params.rbi @@ -0,0 +1,134 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Checkpoints + class PermissionRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + # Identifier for the last permission ID from the previous pagination request. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Number of permissions to retrieve. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # The order in which to retrieve permissions. + sig do + returns( + T.nilable( + OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order::OrSymbol + ) + ) + end + attr_reader :order + + sig do + params( + order: + OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order::OrSymbol + ).void + end + attr_writer :order + + # The ID of the project to get permissions for. + sig { returns(T.nilable(String)) } + attr_reader :project_id + + sig { params(project_id: String).void } + attr_writer :project_id + + sig do + params( + after: String, + limit: Integer, + order: + OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order::OrSymbol, + project_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Identifier for the last permission ID from the previous pagination request. + after: nil, + # Number of permissions to retrieve. + limit: nil, + # The order in which to retrieve permissions. + order: nil, + # The ID of the project to get permissions for. + project_id: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + order: + OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order::OrSymbol, + project_id: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The order in which to retrieve permissions. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASCENDING = + T.let( + :ascending, + OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order::TaggedSymbol + ) + DESCENDING = + T.let( + :descending, + OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi b/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi new file mode 100644 index 00000000..2501be7b --- /dev/null +++ b/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi @@ -0,0 +1,139 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Checkpoints + class PermissionRetrieveResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns( + T::Array[ + OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data + ] + ) + end + attr_accessor :data + + sig { returns(T::Boolean) } + attr_accessor :has_more + + sig { returns(Symbol) } + attr_accessor :object + + sig { returns(T.nilable(String)) } + attr_accessor :first_id + + sig { returns(T.nilable(String)) } + attr_accessor :last_id + + sig do + params( + data: + T::Array[ + OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data::OrHash + ], + has_more: T::Boolean, + first_id: T.nilable(String), + last_id: T.nilable(String), + object: Symbol + ).returns(T.attached_class) + end + def self.new( + data:, + has_more:, + first_id: nil, + last_id: nil, + object: :list + ) + end + + sig do + override.returns( + { + data: + T::Array[ + OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data + ], + has_more: T::Boolean, + object: Symbol, + first_id: T.nilable(String), + last_id: T.nilable(String) + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data, + OpenAI::Internal::AnyHash + ) + end + + # The permission identifier, which can be referenced in the API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the permission was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The object type, which is always "checkpoint.permission". + sig { returns(Symbol) } + attr_accessor :object + + # The project identifier that the permission is for. + sig { returns(String) } + attr_accessor :project_id + + # The `checkpoint.permission` object represents a permission for a fine-tuned + # model checkpoint. + sig do + params( + id: String, + created_at: Integer, + project_id: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The permission identifier, which can be referenced in the API endpoints. + id:, + # The Unix timestamp (in seconds) for when the permission was created. + created_at:, + # The project identifier that the permission is for. + project_id:, + # The object type, which is always "checkpoint.permission". + object: :"checkpoint.permission" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + object: Symbol, + project_id: String + } + ) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/dpo_hyperparameters.rbi b/rbi/openai/models/fine_tuning/dpo_hyperparameters.rbi new file mode 100644 index 00000000..4172bfd0 --- /dev/null +++ b/rbi/openai/models/fine_tuning/dpo_hyperparameters.rbi @@ -0,0 +1,157 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class DpoHyperparameters < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::DpoHyperparameters, + OpenAI::Internal::AnyHash + ) + end + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + sig { returns(T.nilable(T.any(Symbol, Integer))) } + attr_reader :batch_size + + sig { params(batch_size: T.any(Symbol, Integer)).void } + attr_writer :batch_size + + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. + sig { returns(T.nilable(T.any(Symbol, Float))) } + attr_reader :beta + + sig { params(beta: T.any(Symbol, Float)).void } + attr_writer :beta + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + sig { returns(T.nilable(T.any(Symbol, Float))) } + attr_reader :learning_rate_multiplier + + sig { params(learning_rate_multiplier: T.any(Symbol, Float)).void } + attr_writer :learning_rate_multiplier + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + sig { returns(T.nilable(T.any(Symbol, Integer))) } + attr_reader :n_epochs + + sig { params(n_epochs: T.any(Symbol, Integer)).void } + attr_writer :n_epochs + + # The hyperparameters used for the DPO fine-tuning job. + sig do + params( + batch_size: T.any(Symbol, Integer), + beta: T.any(Symbol, Float), + learning_rate_multiplier: T.any(Symbol, Float), + n_epochs: T.any(Symbol, Integer) + ).returns(T.attached_class) + end + def self.new( + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + batch_size: nil, + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. + beta: nil, + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + learning_rate_multiplier: nil, + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + n_epochs: nil + ) + end + + sig do + override.returns( + { + batch_size: T.any(Symbol, Integer), + beta: T.any(Symbol, Float), + learning_rate_multiplier: T.any(Symbol, Float), + n_epochs: T.any(Symbol, Integer) + } + ) + end + def to_hash + end + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + module BatchSize + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::DpoHyperparameters::BatchSize::Variants + ] + ) + end + def self.variants + end + end + + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. + module Beta + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Float) } + + sig do + override.returns( + T::Array[OpenAI::FineTuning::DpoHyperparameters::Beta::Variants] + ) + end + def self.variants + end + end + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Float) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::DpoHyperparameters::LearningRateMultiplier::Variants + ] + ) + end + def self.variants + end + end + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + module NEpochs + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::DpoHyperparameters::NEpochs::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/dpo_method.rbi b/rbi/openai/models/fine_tuning/dpo_method.rbi new file mode 100644 index 00000000..86d2b505 --- /dev/null +++ b/rbi/openai/models/fine_tuning/dpo_method.rbi @@ -0,0 +1,45 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class DpoMethod < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::FineTuning::DpoMethod, OpenAI::Internal::AnyHash) + end + + # The hyperparameters used for the DPO fine-tuning job. + sig { returns(T.nilable(OpenAI::FineTuning::DpoHyperparameters)) } + attr_reader :hyperparameters + + sig do + params( + hyperparameters: OpenAI::FineTuning::DpoHyperparameters::OrHash + ).void + end + attr_writer :hyperparameters + + # Configuration for the DPO fine-tuning method. + sig do + params( + hyperparameters: OpenAI::FineTuning::DpoHyperparameters::OrHash + ).returns(T.attached_class) + end + def self.new( + # The hyperparameters used for the DPO fine-tuning job. + hyperparameters: nil + ) + end + + sig do + override.returns( + { hyperparameters: OpenAI::FineTuning::DpoHyperparameters } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/fine_tuning_job.rbi b/rbi/openai/models/fine_tuning/fine_tuning_job.rbi new file mode 100644 index 00000000..090fa734 --- /dev/null +++ b/rbi/openai/models/fine_tuning/fine_tuning_job.rbi @@ -0,0 +1,624 @@ +# typed: strong + +module OpenAI + module Models + FineTuningJob = FineTuning::FineTuningJob + + module FineTuning + class FineTuningJob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::FineTuning::FineTuningJob, OpenAI::Internal::AnyHash) + end + + # The object identifier, which can be referenced in the API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the fine-tuning job was created. + sig { returns(Integer) } + attr_accessor :created_at + + # For fine-tuning jobs that have `failed`, this will contain more information on + # the cause of the failure. + sig { returns(T.nilable(OpenAI::FineTuning::FineTuningJob::Error)) } + attr_reader :error + + sig do + params( + error: T.nilable(OpenAI::FineTuning::FineTuningJob::Error::OrHash) + ).void + end + attr_writer :error + + # The name of the fine-tuned model that is being created. The value will be null + # if the fine-tuning job is still running. + sig { returns(T.nilable(String)) } + attr_accessor :fine_tuned_model + + # The Unix timestamp (in seconds) for when the fine-tuning job was finished. The + # value will be null if the fine-tuning job is still running. + sig { returns(T.nilable(Integer)) } + attr_accessor :finished_at + + # The hyperparameters used for the fine-tuning job. This value will only be + # returned when running `supervised` jobs. + sig { returns(OpenAI::FineTuning::FineTuningJob::Hyperparameters) } + attr_reader :hyperparameters + + sig do + params( + hyperparameters: + OpenAI::FineTuning::FineTuningJob::Hyperparameters::OrHash + ).void + end + attr_writer :hyperparameters + + # The base model that is being fine-tuned. + sig { returns(String) } + attr_accessor :model + + # The object type, which is always "fine_tuning.job". + sig { returns(Symbol) } + attr_accessor :object + + # The organization that owns the fine-tuning job. + sig { returns(String) } + attr_accessor :organization_id + + # The compiled results file ID(s) for the fine-tuning job. You can retrieve the + # results with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + sig { returns(T::Array[String]) } + attr_accessor :result_files + + # The seed used for the fine-tuning job. + sig { returns(Integer) } + attr_accessor :seed + + # The current status of the fine-tuning job, which can be either + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + sig { returns(OpenAI::FineTuning::FineTuningJob::Status::TaggedSymbol) } + attr_accessor :status + + # The total number of billable tokens processed by this fine-tuning job. The value + # will be null if the fine-tuning job is still running. + sig { returns(T.nilable(Integer)) } + attr_accessor :trained_tokens + + # The file ID used for training. You can retrieve the training data with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + sig { returns(String) } + attr_accessor :training_file + + # The file ID used for validation. You can retrieve the validation results with + # the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + sig { returns(T.nilable(String)) } + attr_accessor :validation_file + + # The Unix timestamp (in seconds) for when the fine-tuning job is estimated to + # finish. The value will be null if the fine-tuning job is not running. + sig { returns(T.nilable(Integer)) } + attr_accessor :estimated_finish + + # A list of integrations to enable for this fine-tuning job. + sig do + returns( + T.nilable( + T::Array[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject] + ) + ) + end + attr_accessor :integrations + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The method used for fine-tuning. + sig { returns(T.nilable(OpenAI::FineTuning::FineTuningJob::Method)) } + attr_reader :method_ + + sig do + params( + method_: OpenAI::FineTuning::FineTuningJob::Method::OrHash + ).void + end + attr_writer :method_ + + # The `fine_tuning.job` object represents a fine-tuning job that has been created + # through the API. + sig do + params( + id: String, + created_at: Integer, + error: T.nilable(OpenAI::FineTuning::FineTuningJob::Error::OrHash), + fine_tuned_model: T.nilable(String), + finished_at: T.nilable(Integer), + hyperparameters: + OpenAI::FineTuning::FineTuningJob::Hyperparameters::OrHash, + model: String, + organization_id: String, + result_files: T::Array[String], + seed: Integer, + status: OpenAI::FineTuning::FineTuningJob::Status::OrSymbol, + trained_tokens: T.nilable(Integer), + training_file: String, + validation_file: T.nilable(String), + estimated_finish: T.nilable(Integer), + integrations: + T.nilable( + T::Array[ + OpenAI::FineTuning::FineTuningJobWandbIntegrationObject::OrHash + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + method_: OpenAI::FineTuning::FineTuningJob::Method::OrHash, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The object identifier, which can be referenced in the API endpoints. + id:, + # The Unix timestamp (in seconds) for when the fine-tuning job was created. + created_at:, + # For fine-tuning jobs that have `failed`, this will contain more information on + # the cause of the failure. + error:, + # The name of the fine-tuned model that is being created. The value will be null + # if the fine-tuning job is still running. + fine_tuned_model:, + # The Unix timestamp (in seconds) for when the fine-tuning job was finished. The + # value will be null if the fine-tuning job is still running. + finished_at:, + # The hyperparameters used for the fine-tuning job. This value will only be + # returned when running `supervised` jobs. + hyperparameters:, + # The base model that is being fine-tuned. + model:, + # The organization that owns the fine-tuning job. + organization_id:, + # The compiled results file ID(s) for the fine-tuning job. You can retrieve the + # results with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + result_files:, + # The seed used for the fine-tuning job. + seed:, + # The current status of the fine-tuning job, which can be either + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + status:, + # The total number of billable tokens processed by this fine-tuning job. The value + # will be null if the fine-tuning job is still running. + trained_tokens:, + # The file ID used for training. You can retrieve the training data with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + training_file:, + # The file ID used for validation. You can retrieve the validation results with + # the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + validation_file:, + # The Unix timestamp (in seconds) for when the fine-tuning job is estimated to + # finish. The value will be null if the fine-tuning job is not running. + estimated_finish: nil, + # A list of integrations to enable for this fine-tuning job. + integrations: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The method used for fine-tuning. + method_: nil, + # The object type, which is always "fine_tuning.job". + object: :"fine_tuning.job" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + error: T.nilable(OpenAI::FineTuning::FineTuningJob::Error), + fine_tuned_model: T.nilable(String), + finished_at: T.nilable(Integer), + hyperparameters: + OpenAI::FineTuning::FineTuningJob::Hyperparameters, + model: String, + object: Symbol, + organization_id: String, + result_files: T::Array[String], + seed: Integer, + status: OpenAI::FineTuning::FineTuningJob::Status::TaggedSymbol, + trained_tokens: T.nilable(Integer), + training_file: String, + validation_file: T.nilable(String), + estimated_finish: T.nilable(Integer), + integrations: + T.nilable( + T::Array[ + OpenAI::FineTuning::FineTuningJobWandbIntegrationObject + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + method_: OpenAI::FineTuning::FineTuningJob::Method + } + ) + end + def to_hash + end + + class Error < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::FineTuningJob::Error, + OpenAI::Internal::AnyHash + ) + end + + # A machine-readable error code. + sig { returns(String) } + attr_accessor :code + + # A human-readable error message. + sig { returns(String) } + attr_accessor :message + + # The parameter that was invalid, usually `training_file` or `validation_file`. + # This field will be null if the failure was not parameter-specific. + sig { returns(T.nilable(String)) } + attr_accessor :param + + # For fine-tuning jobs that have `failed`, this will contain more information on + # the cause of the failure. + sig do + params( + code: String, + message: String, + param: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + # A machine-readable error code. + code:, + # A human-readable error message. + message:, + # The parameter that was invalid, usually `training_file` or `validation_file`. + # This field will be null if the failure was not parameter-specific. + param: + ) + end + + sig do + override.returns( + { code: String, message: String, param: T.nilable(String) } + ) + end + def to_hash + end + end + + class Hyperparameters < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::FineTuningJob::Hyperparameters, + OpenAI::Internal::AnyHash + ) + end + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + sig do + returns( + T.nilable( + OpenAI::FineTuning::FineTuningJob::Hyperparameters::BatchSize::Variants + ) + ) + end + attr_accessor :batch_size + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + sig do + returns( + T.nilable( + OpenAI::FineTuning::FineTuningJob::Hyperparameters::LearningRateMultiplier::Variants + ) + ) + end + attr_reader :learning_rate_multiplier + + sig { params(learning_rate_multiplier: T.any(Symbol, Float)).void } + attr_writer :learning_rate_multiplier + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + sig do + returns( + T.nilable( + OpenAI::FineTuning::FineTuningJob::Hyperparameters::NEpochs::Variants + ) + ) + end + attr_reader :n_epochs + + sig { params(n_epochs: T.any(Symbol, Integer)).void } + attr_writer :n_epochs + + # The hyperparameters used for the fine-tuning job. This value will only be + # returned when running `supervised` jobs. + sig do + params( + batch_size: T.nilable(T.any(Symbol, Integer)), + learning_rate_multiplier: T.any(Symbol, Float), + n_epochs: T.any(Symbol, Integer) + ).returns(T.attached_class) + end + def self.new( + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + batch_size: nil, + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + learning_rate_multiplier: nil, + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + n_epochs: nil + ) + end + + sig do + override.returns( + { + batch_size: + T.nilable( + OpenAI::FineTuning::FineTuningJob::Hyperparameters::BatchSize::Variants + ), + learning_rate_multiplier: + OpenAI::FineTuning::FineTuningJob::Hyperparameters::LearningRateMultiplier::Variants, + n_epochs: + OpenAI::FineTuning::FineTuningJob::Hyperparameters::NEpochs::Variants + } + ) + end + def to_hash + end + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + module BatchSize + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::FineTuningJob::Hyperparameters::BatchSize::Variants + ] + ) + end + def self.variants + end + end + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Float) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::FineTuningJob::Hyperparameters::LearningRateMultiplier::Variants + ] + ) + end + def self.variants + end + end + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + module NEpochs + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::FineTuningJob::Hyperparameters::NEpochs::Variants + ] + ) + end + def self.variants + end + end + end + + # The current status of the fine-tuning job, which can be either + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::FineTuning::FineTuningJob::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + VALIDATING_FILES = + T.let( + :validating_files, + OpenAI::FineTuning::FineTuningJob::Status::TaggedSymbol + ) + QUEUED = + T.let( + :queued, + OpenAI::FineTuning::FineTuningJob::Status::TaggedSymbol + ) + RUNNING = + T.let( + :running, + OpenAI::FineTuning::FineTuningJob::Status::TaggedSymbol + ) + SUCCEEDED = + T.let( + :succeeded, + OpenAI::FineTuning::FineTuningJob::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::FineTuning::FineTuningJob::Status::TaggedSymbol + ) + CANCELLED = + T.let( + :cancelled, + OpenAI::FineTuning::FineTuningJob::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::FineTuning::FineTuningJob::Status::TaggedSymbol] + ) + end + def self.values + end + end + + class Method < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::FineTuningJob::Method, + OpenAI::Internal::AnyHash + ) + end + + # The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + sig do + returns( + OpenAI::FineTuning::FineTuningJob::Method::Type::TaggedSymbol + ) + end + attr_accessor :type + + # Configuration for the DPO fine-tuning method. + sig { returns(T.nilable(OpenAI::FineTuning::DpoMethod)) } + attr_reader :dpo + + sig { params(dpo: OpenAI::FineTuning::DpoMethod::OrHash).void } + attr_writer :dpo + + # Configuration for the reinforcement fine-tuning method. + sig { returns(T.nilable(OpenAI::FineTuning::ReinforcementMethod)) } + attr_reader :reinforcement + + sig do + params( + reinforcement: OpenAI::FineTuning::ReinforcementMethod::OrHash + ).void + end + attr_writer :reinforcement + + # Configuration for the supervised fine-tuning method. + sig { returns(T.nilable(OpenAI::FineTuning::SupervisedMethod)) } + attr_reader :supervised + + sig do + params( + supervised: OpenAI::FineTuning::SupervisedMethod::OrHash + ).void + end + attr_writer :supervised + + # The method used for fine-tuning. + sig do + params( + type: OpenAI::FineTuning::FineTuningJob::Method::Type::OrSymbol, + dpo: OpenAI::FineTuning::DpoMethod::OrHash, + reinforcement: OpenAI::FineTuning::ReinforcementMethod::OrHash, + supervised: OpenAI::FineTuning::SupervisedMethod::OrHash + ).returns(T.attached_class) + end + def self.new( + # The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + type:, + # Configuration for the DPO fine-tuning method. + dpo: nil, + # Configuration for the reinforcement fine-tuning method. + reinforcement: nil, + # Configuration for the supervised fine-tuning method. + supervised: nil + ) + end + + sig do + override.returns( + { + type: + OpenAI::FineTuning::FineTuningJob::Method::Type::TaggedSymbol, + dpo: OpenAI::FineTuning::DpoMethod, + reinforcement: OpenAI::FineTuning::ReinforcementMethod, + supervised: OpenAI::FineTuning::SupervisedMethod + } + ) + end + def to_hash + end + + # The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::FineTuning::FineTuningJob::Method::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SUPERVISED = + T.let( + :supervised, + OpenAI::FineTuning::FineTuningJob::Method::Type::TaggedSymbol + ) + DPO = + T.let( + :dpo, + OpenAI::FineTuning::FineTuningJob::Method::Type::TaggedSymbol + ) + REINFORCEMENT = + T.let( + :reinforcement, + OpenAI::FineTuning::FineTuningJob::Method::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::FineTuningJob::Method::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/fine_tuning_job_event.rbi b/rbi/openai/models/fine_tuning/fine_tuning_job_event.rbi new file mode 100644 index 00000000..abbbe399 --- /dev/null +++ b/rbi/openai/models/fine_tuning/fine_tuning_job_event.rbi @@ -0,0 +1,181 @@ +# typed: strong + +module OpenAI + module Models + FineTuningJobEvent = FineTuning::FineTuningJobEvent + + module FineTuning + class FineTuningJobEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::FineTuningJobEvent, + OpenAI::Internal::AnyHash + ) + end + + # The object identifier. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the fine-tuning job was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The log level of the event. + sig do + returns(OpenAI::FineTuning::FineTuningJobEvent::Level::TaggedSymbol) + end + attr_accessor :level + + # The message of the event. + sig { returns(String) } + attr_accessor :message + + # The object type, which is always "fine_tuning.job.event". + sig { returns(Symbol) } + attr_accessor :object + + # The data associated with the event. + sig { returns(T.nilable(T.anything)) } + attr_reader :data + + sig { params(data: T.anything).void } + attr_writer :data + + # The type of event. + sig do + returns( + T.nilable( + OpenAI::FineTuning::FineTuningJobEvent::Type::TaggedSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: OpenAI::FineTuning::FineTuningJobEvent::Type::OrSymbol + ).void + end + attr_writer :type + + # Fine-tuning job event object + sig do + params( + id: String, + created_at: Integer, + level: OpenAI::FineTuning::FineTuningJobEvent::Level::OrSymbol, + message: String, + data: T.anything, + type: OpenAI::FineTuning::FineTuningJobEvent::Type::OrSymbol, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The object identifier. + id:, + # The Unix timestamp (in seconds) for when the fine-tuning job was created. + created_at:, + # The log level of the event. + level:, + # The message of the event. + message:, + # The data associated with the event. + data: nil, + # The type of event. + type: nil, + # The object type, which is always "fine_tuning.job.event". + object: :"fine_tuning.job.event" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + level: + OpenAI::FineTuning::FineTuningJobEvent::Level::TaggedSymbol, + message: String, + object: Symbol, + data: T.anything, + type: OpenAI::FineTuning::FineTuningJobEvent::Type::TaggedSymbol + } + ) + end + def to_hash + end + + # The log level of the event. + module Level + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::FineTuning::FineTuningJobEvent::Level) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + INFO = + T.let( + :info, + OpenAI::FineTuning::FineTuningJobEvent::Level::TaggedSymbol + ) + WARN = + T.let( + :warn, + OpenAI::FineTuning::FineTuningJobEvent::Level::TaggedSymbol + ) + ERROR = + T.let( + :error, + OpenAI::FineTuning::FineTuningJobEvent::Level::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::FineTuningJobEvent::Level::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of event. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::FineTuning::FineTuningJobEvent::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::FineTuning::FineTuningJobEvent::Type::TaggedSymbol + ) + METRICS = + T.let( + :metrics, + OpenAI::FineTuning::FineTuningJobEvent::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::FineTuningJobEvent::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/fine_tuning_job_integration.rbi b/rbi/openai/models/fine_tuning/fine_tuning_job_integration.rbi new file mode 100644 index 00000000..a2e8e306 --- /dev/null +++ b/rbi/openai/models/fine_tuning/fine_tuning_job_integration.rbi @@ -0,0 +1,12 @@ +# typed: strong + +module OpenAI + module Models + FineTuningJobIntegration = FineTuning::FineTuningJobIntegration + + module FineTuning + FineTuningJobIntegration = + OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject + end + end +end diff --git a/rbi/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi b/rbi/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi new file mode 100644 index 00000000..a728e848 --- /dev/null +++ b/rbi/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi @@ -0,0 +1,85 @@ +# typed: strong + +module OpenAI + module Models + FineTuningJobWandbIntegration = FineTuning::FineTuningJobWandbIntegration + + module FineTuning + class FineTuningJobWandbIntegration < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::FineTuningJobWandbIntegration, + OpenAI::Internal::AnyHash + ) + end + + # The name of the project that the new run will be created under. + sig { returns(String) } + attr_accessor :project + + # The entity to use for the run. This allows you to set the team or username of + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. + sig { returns(T.nilable(String)) } + attr_accessor :entity + + # A display name to set for the run. If not set, we will use the Job ID as the + # name. + sig { returns(T.nilable(String)) } + attr_accessor :name + + # A list of tags to be attached to the newly created run. These tags are passed + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + sig { returns(T.nilable(T::Array[String])) } + attr_reader :tags + + sig { params(tags: T::Array[String]).void } + attr_writer :tags + + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. + sig do + params( + project: String, + entity: T.nilable(String), + name: T.nilable(String), + tags: T::Array[String] + ).returns(T.attached_class) + end + def self.new( + # The name of the project that the new run will be created under. + project:, + # The entity to use for the run. This allows you to set the team or username of + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. + entity: nil, + # A display name to set for the run. If not set, we will use the Job ID as the + # name. + name: nil, + # A list of tags to be attached to the newly created run. These tags are passed + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + tags: nil + ) + end + + sig do + override.returns( + { + project: String, + entity: T.nilable(String), + name: T.nilable(String), + tags: T::Array[String] + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi b/rbi/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi new file mode 100644 index 00000000..b92f3743 --- /dev/null +++ b/rbi/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi @@ -0,0 +1,66 @@ +# typed: strong + +module OpenAI + module Models + FineTuningJobWandbIntegrationObject = + FineTuning::FineTuningJobWandbIntegrationObject + + module FineTuning + class FineTuningJobWandbIntegrationObject < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::FineTuningJobWandbIntegrationObject, + OpenAI::Internal::AnyHash + ) + end + + # The type of the integration being enabled for the fine-tuning job + sig { returns(Symbol) } + attr_accessor :type + + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. + sig { returns(OpenAI::FineTuning::FineTuningJobWandbIntegration) } + attr_reader :wandb + + sig do + params( + wandb: OpenAI::FineTuning::FineTuningJobWandbIntegration::OrHash + ).void + end + attr_writer :wandb + + sig do + params( + wandb: OpenAI::FineTuning::FineTuningJobWandbIntegration::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. + wandb:, + # The type of the integration being enabled for the fine-tuning job + type: :wandb + ) + end + + sig do + override.returns( + { + type: Symbol, + wandb: OpenAI::FineTuning::FineTuningJobWandbIntegration + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/job_cancel_params.rbi b/rbi/openai/models/fine_tuning/job_cancel_params.rbi new file mode 100644 index 00000000..3ae73b64 --- /dev/null +++ b/rbi/openai/models/fine_tuning/job_cancel_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class JobCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::JobCancelParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/job_create_params.rbi b/rbi/openai/models/fine_tuning/job_create_params.rbi new file mode 100644 index 00000000..4ac7cefa --- /dev/null +++ b/rbi/openai/models/fine_tuning/job_create_params.rbi @@ -0,0 +1,669 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class JobCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::JobCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + sig do + returns( + T.any(String, OpenAI::FineTuning::JobCreateParams::Model::OrSymbol) + ) + end + attr_accessor :model + + # The ID of an uploaded file that contains training data. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. + # + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. + # + # See the + # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) + # for more details. + sig { returns(String) } + attr_accessor :training_file + + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. + sig do + returns( + T.nilable(OpenAI::FineTuning::JobCreateParams::Hyperparameters) + ) + end + attr_reader :hyperparameters + + sig do + params( + hyperparameters: + OpenAI::FineTuning::JobCreateParams::Hyperparameters::OrHash + ).void + end + attr_writer :hyperparameters + + # A list of integrations to enable for your fine-tuning job. + sig do + returns( + T.nilable( + T::Array[OpenAI::FineTuning::JobCreateParams::Integration] + ) + ) + end + attr_accessor :integrations + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The method used for fine-tuning. + sig { returns(T.nilable(OpenAI::FineTuning::JobCreateParams::Method)) } + attr_reader :method_ + + sig do + params( + method_: OpenAI::FineTuning::JobCreateParams::Method::OrHash + ).void + end + attr_writer :method_ + + # The seed controls the reproducibility of the job. Passing in the same seed and + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. + sig { returns(T.nilable(Integer)) } + attr_accessor :seed + + # A string of up to 64 characters that will be added to your fine-tuned model + # name. + # + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + sig { returns(T.nilable(String)) } + attr_accessor :suffix + + # The ID of an uploaded file that contains validation data. + # + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. + # + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. + # + # See the + # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) + # for more details. + sig { returns(T.nilable(String)) } + attr_accessor :validation_file + + sig do + params( + model: + T.any( + String, + OpenAI::FineTuning::JobCreateParams::Model::OrSymbol + ), + training_file: String, + hyperparameters: + OpenAI::FineTuning::JobCreateParams::Hyperparameters::OrHash, + integrations: + T.nilable( + T::Array[ + OpenAI::FineTuning::JobCreateParams::Integration::OrHash + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + method_: OpenAI::FineTuning::JobCreateParams::Method::OrHash, + seed: T.nilable(Integer), + suffix: T.nilable(String), + validation_file: T.nilable(String), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + model:, + # The ID of an uploaded file that contains training data. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. + # + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. + # + # See the + # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) + # for more details. + training_file:, + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. + hyperparameters: nil, + # A list of integrations to enable for your fine-tuning job. + integrations: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The method used for fine-tuning. + method_: nil, + # The seed controls the reproducibility of the job. Passing in the same seed and + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. + seed: nil, + # A string of up to 64 characters that will be added to your fine-tuned model + # name. + # + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + suffix: nil, + # The ID of an uploaded file that contains validation data. + # + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. + # + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. + # + # See the + # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) + # for more details. + validation_file: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + model: + T.any( + String, + OpenAI::FineTuning::JobCreateParams::Model::OrSymbol + ), + training_file: String, + hyperparameters: + OpenAI::FineTuning::JobCreateParams::Hyperparameters, + integrations: + T.nilable( + T::Array[OpenAI::FineTuning::JobCreateParams::Integration] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + method_: OpenAI::FineTuning::JobCreateParams::Method, + seed: T.nilable(Integer), + suffix: T.nilable(String), + validation_file: T.nilable(String), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::FineTuning::JobCreateParams::Model::TaggedSymbol + ) + end + + sig do + override.returns( + T::Array[OpenAI::FineTuning::JobCreateParams::Model::Variants] + ) + end + def self.variants + end + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::FineTuning::JobCreateParams::Model) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + BABBAGE_002 = + T.let( + :"babbage-002", + OpenAI::FineTuning::JobCreateParams::Model::TaggedSymbol + ) + DAVINCI_002 = + T.let( + :"davinci-002", + OpenAI::FineTuning::JobCreateParams::Model::TaggedSymbol + ) + GPT_3_5_TURBO = + T.let( + :"gpt-3.5-turbo", + OpenAI::FineTuning::JobCreateParams::Model::TaggedSymbol + ) + GPT_4O_MINI = + T.let( + :"gpt-4o-mini", + OpenAI::FineTuning::JobCreateParams::Model::TaggedSymbol + ) + end + + class Hyperparameters < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::JobCreateParams::Hyperparameters, + OpenAI::Internal::AnyHash + ) + end + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + sig { returns(T.nilable(T.any(Symbol, Integer))) } + attr_reader :batch_size + + sig { params(batch_size: T.any(Symbol, Integer)).void } + attr_writer :batch_size + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + sig { returns(T.nilable(T.any(Symbol, Float))) } + attr_reader :learning_rate_multiplier + + sig { params(learning_rate_multiplier: T.any(Symbol, Float)).void } + attr_writer :learning_rate_multiplier + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + sig { returns(T.nilable(T.any(Symbol, Integer))) } + attr_reader :n_epochs + + sig { params(n_epochs: T.any(Symbol, Integer)).void } + attr_writer :n_epochs + + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. + sig do + params( + batch_size: T.any(Symbol, Integer), + learning_rate_multiplier: T.any(Symbol, Float), + n_epochs: T.any(Symbol, Integer) + ).returns(T.attached_class) + end + def self.new( + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + batch_size: nil, + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + learning_rate_multiplier: nil, + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + n_epochs: nil + ) + end + + sig do + override.returns( + { + batch_size: T.any(Symbol, Integer), + learning_rate_multiplier: T.any(Symbol, Float), + n_epochs: T.any(Symbol, Integer) + } + ) + end + def to_hash + end + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + module BatchSize + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::JobCreateParams::Hyperparameters::BatchSize::Variants + ] + ) + end + def self.variants + end + end + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Float) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::JobCreateParams::Hyperparameters::LearningRateMultiplier::Variants + ] + ) + end + def self.variants + end + end + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + module NEpochs + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::JobCreateParams::Hyperparameters::NEpochs::Variants + ] + ) + end + def self.variants + end + end + end + + class Integration < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::JobCreateParams::Integration, + OpenAI::Internal::AnyHash + ) + end + + # The type of integration to enable. Currently, only "wandb" (Weights and Biases) + # is supported. + sig { returns(Symbol) } + attr_accessor :type + + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. + sig do + returns(OpenAI::FineTuning::JobCreateParams::Integration::Wandb) + end + attr_reader :wandb + + sig do + params( + wandb: + OpenAI::FineTuning::JobCreateParams::Integration::Wandb::OrHash + ).void + end + attr_writer :wandb + + sig do + params( + wandb: + OpenAI::FineTuning::JobCreateParams::Integration::Wandb::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. + wandb:, + # The type of integration to enable. Currently, only "wandb" (Weights and Biases) + # is supported. + type: :wandb + ) + end + + sig do + override.returns( + { + type: Symbol, + wandb: OpenAI::FineTuning::JobCreateParams::Integration::Wandb + } + ) + end + def to_hash + end + + class Wandb < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::JobCreateParams::Integration::Wandb, + OpenAI::Internal::AnyHash + ) + end + + # The name of the project that the new run will be created under. + sig { returns(String) } + attr_accessor :project + + # The entity to use for the run. This allows you to set the team or username of + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. + sig { returns(T.nilable(String)) } + attr_accessor :entity + + # A display name to set for the run. If not set, we will use the Job ID as the + # name. + sig { returns(T.nilable(String)) } + attr_accessor :name + + # A list of tags to be attached to the newly created run. These tags are passed + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + sig { returns(T.nilable(T::Array[String])) } + attr_reader :tags + + sig { params(tags: T::Array[String]).void } + attr_writer :tags + + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. + sig do + params( + project: String, + entity: T.nilable(String), + name: T.nilable(String), + tags: T::Array[String] + ).returns(T.attached_class) + end + def self.new( + # The name of the project that the new run will be created under. + project:, + # The entity to use for the run. This allows you to set the team or username of + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. + entity: nil, + # A display name to set for the run. If not set, we will use the Job ID as the + # name. + name: nil, + # A list of tags to be attached to the newly created run. These tags are passed + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + tags: nil + ) + end + + sig do + override.returns( + { + project: String, + entity: T.nilable(String), + name: T.nilable(String), + tags: T::Array[String] + } + ) + end + def to_hash + end + end + end + + class Method < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::JobCreateParams::Method, + OpenAI::Internal::AnyHash + ) + end + + # The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + sig do + returns(OpenAI::FineTuning::JobCreateParams::Method::Type::OrSymbol) + end + attr_accessor :type + + # Configuration for the DPO fine-tuning method. + sig { returns(T.nilable(OpenAI::FineTuning::DpoMethod)) } + attr_reader :dpo + + sig { params(dpo: OpenAI::FineTuning::DpoMethod::OrHash).void } + attr_writer :dpo + + # Configuration for the reinforcement fine-tuning method. + sig { returns(T.nilable(OpenAI::FineTuning::ReinforcementMethod)) } + attr_reader :reinforcement + + sig do + params( + reinforcement: OpenAI::FineTuning::ReinforcementMethod::OrHash + ).void + end + attr_writer :reinforcement + + # Configuration for the supervised fine-tuning method. + sig { returns(T.nilable(OpenAI::FineTuning::SupervisedMethod)) } + attr_reader :supervised + + sig do + params( + supervised: OpenAI::FineTuning::SupervisedMethod::OrHash + ).void + end + attr_writer :supervised + + # The method used for fine-tuning. + sig do + params( + type: OpenAI::FineTuning::JobCreateParams::Method::Type::OrSymbol, + dpo: OpenAI::FineTuning::DpoMethod::OrHash, + reinforcement: OpenAI::FineTuning::ReinforcementMethod::OrHash, + supervised: OpenAI::FineTuning::SupervisedMethod::OrHash + ).returns(T.attached_class) + end + def self.new( + # The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + type:, + # Configuration for the DPO fine-tuning method. + dpo: nil, + # Configuration for the reinforcement fine-tuning method. + reinforcement: nil, + # Configuration for the supervised fine-tuning method. + supervised: nil + ) + end + + sig do + override.returns( + { + type: + OpenAI::FineTuning::JobCreateParams::Method::Type::OrSymbol, + dpo: OpenAI::FineTuning::DpoMethod, + reinforcement: OpenAI::FineTuning::ReinforcementMethod, + supervised: OpenAI::FineTuning::SupervisedMethod + } + ) + end + def to_hash + end + + # The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::FineTuning::JobCreateParams::Method::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SUPERVISED = + T.let( + :supervised, + OpenAI::FineTuning::JobCreateParams::Method::Type::TaggedSymbol + ) + DPO = + T.let( + :dpo, + OpenAI::FineTuning::JobCreateParams::Method::Type::TaggedSymbol + ) + REINFORCEMENT = + T.let( + :reinforcement, + OpenAI::FineTuning::JobCreateParams::Method::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::JobCreateParams::Method::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/job_list_events_params.rbi b/rbi/openai/models/fine_tuning/job_list_events_params.rbi new file mode 100644 index 00000000..9f01086f --- /dev/null +++ b/rbi/openai/models/fine_tuning/job_list_events_params.rbi @@ -0,0 +1,62 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class JobListEventsParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::JobListEventsParams, + OpenAI::Internal::AnyHash + ) + end + + # Identifier for the last event from the previous pagination request. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Number of events to retrieve. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + sig do + params( + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Identifier for the last event from the previous pagination request. + after: nil, + # Number of events to retrieve. + limit: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/job_list_params.rbi b/rbi/openai/models/fine_tuning/job_list_params.rbi new file mode 100644 index 00000000..96c0267e --- /dev/null +++ b/rbi/openai/models/fine_tuning/job_list_params.rbi @@ -0,0 +1,69 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class JobListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::FineTuning::JobListParams, OpenAI::Internal::AnyHash) + end + + # Identifier for the last job from the previous pagination request. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Number of fine-tuning jobs to retrieve. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + # Alternatively, set `metadata=null` to indicate no metadata. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + sig do + params( + after: String, + limit: Integer, + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Identifier for the last job from the previous pagination request. + after: nil, + # Number of fine-tuning jobs to retrieve. + limit: nil, + # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + # Alternatively, set `metadata=null` to indicate no metadata. + metadata: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/job_pause_params.rbi b/rbi/openai/models/fine_tuning/job_pause_params.rbi new file mode 100644 index 00000000..cf0e05ac --- /dev/null +++ b/rbi/openai/models/fine_tuning/job_pause_params.rbi @@ -0,0 +1,29 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class JobPauseParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::FineTuning::JobPauseParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/job_resume_params.rbi b/rbi/openai/models/fine_tuning/job_resume_params.rbi new file mode 100644 index 00000000..43fa43b3 --- /dev/null +++ b/rbi/openai/models/fine_tuning/job_resume_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class JobResumeParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::JobResumeParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/job_retrieve_params.rbi b/rbi/openai/models/fine_tuning/job_retrieve_params.rbi new file mode 100644 index 00000000..b43b2e48 --- /dev/null +++ b/rbi/openai/models/fine_tuning/job_retrieve_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class JobRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::JobRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi b/rbi/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi new file mode 100644 index 00000000..06c2f618 --- /dev/null +++ b/rbi/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi @@ -0,0 +1,64 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Jobs + class CheckpointListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::Jobs::CheckpointListParams, + OpenAI::Internal::AnyHash + ) + end + + # Identifier for the last checkpoint ID from the previous pagination request. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Number of checkpoints to retrieve. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + sig do + params( + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Identifier for the last checkpoint ID from the previous pagination request. + after: nil, + # Number of checkpoints to retrieve. + limit: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi b/rbi/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi new file mode 100644 index 00000000..ab83c2cc --- /dev/null +++ b/rbi/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi @@ -0,0 +1,197 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + module Jobs + class FineTuningJobCheckpoint < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint, + OpenAI::Internal::AnyHash + ) + end + + # The checkpoint identifier, which can be referenced in the API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the checkpoint was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The name of the fine-tuned checkpoint model that is created. + sig { returns(String) } + attr_accessor :fine_tuned_model_checkpoint + + # The name of the fine-tuning job that this checkpoint was created from. + sig { returns(String) } + attr_accessor :fine_tuning_job_id + + # Metrics at the step number during the fine-tuning job. + sig do + returns(OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics) + end + attr_reader :metrics + + sig do + params( + metrics: + OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics::OrHash + ).void + end + attr_writer :metrics + + # The object type, which is always "fine_tuning.job.checkpoint". + sig { returns(Symbol) } + attr_accessor :object + + # The step number that the checkpoint was created at. + sig { returns(Integer) } + attr_accessor :step_number + + # The `fine_tuning.job.checkpoint` object represents a model checkpoint for a + # fine-tuning job that is ready to use. + sig do + params( + id: String, + created_at: Integer, + fine_tuned_model_checkpoint: String, + fine_tuning_job_id: String, + metrics: + OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics::OrHash, + step_number: Integer, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The checkpoint identifier, which can be referenced in the API endpoints. + id:, + # The Unix timestamp (in seconds) for when the checkpoint was created. + created_at:, + # The name of the fine-tuned checkpoint model that is created. + fine_tuned_model_checkpoint:, + # The name of the fine-tuning job that this checkpoint was created from. + fine_tuning_job_id:, + # Metrics at the step number during the fine-tuning job. + metrics:, + # The step number that the checkpoint was created at. + step_number:, + # The object type, which is always "fine_tuning.job.checkpoint". + object: :"fine_tuning.job.checkpoint" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + fine_tuned_model_checkpoint: String, + fine_tuning_job_id: String, + metrics: + OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, + object: Symbol, + step_number: Integer + } + ) + end + def to_hash + end + + class Metrics < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(T.nilable(Float)) } + attr_reader :full_valid_loss + + sig { params(full_valid_loss: Float).void } + attr_writer :full_valid_loss + + sig { returns(T.nilable(Float)) } + attr_reader :full_valid_mean_token_accuracy + + sig { params(full_valid_mean_token_accuracy: Float).void } + attr_writer :full_valid_mean_token_accuracy + + sig { returns(T.nilable(Float)) } + attr_reader :step + + sig { params(step: Float).void } + attr_writer :step + + sig { returns(T.nilable(Float)) } + attr_reader :train_loss + + sig { params(train_loss: Float).void } + attr_writer :train_loss + + sig { returns(T.nilable(Float)) } + attr_reader :train_mean_token_accuracy + + sig { params(train_mean_token_accuracy: Float).void } + attr_writer :train_mean_token_accuracy + + sig { returns(T.nilable(Float)) } + attr_reader :valid_loss + + sig { params(valid_loss: Float).void } + attr_writer :valid_loss + + sig { returns(T.nilable(Float)) } + attr_reader :valid_mean_token_accuracy + + sig { params(valid_mean_token_accuracy: Float).void } + attr_writer :valid_mean_token_accuracy + + # Metrics at the step number during the fine-tuning job. + sig do + params( + full_valid_loss: Float, + full_valid_mean_token_accuracy: Float, + step: Float, + train_loss: Float, + train_mean_token_accuracy: Float, + valid_loss: Float, + valid_mean_token_accuracy: Float + ).returns(T.attached_class) + end + def self.new( + full_valid_loss: nil, + full_valid_mean_token_accuracy: nil, + step: nil, + train_loss: nil, + train_mean_token_accuracy: nil, + valid_loss: nil, + valid_mean_token_accuracy: nil + ) + end + + sig do + override.returns( + { + full_valid_loss: Float, + full_valid_mean_token_accuracy: Float, + step: Float, + train_loss: Float, + train_mean_token_accuracy: Float, + valid_loss: Float, + valid_mean_token_accuracy: Float + } + ) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/reinforcement_hyperparameters.rbi b/rbi/openai/models/fine_tuning/reinforcement_hyperparameters.rbi new file mode 100644 index 00000000..3105e2ef --- /dev/null +++ b/rbi/openai/models/fine_tuning/reinforcement_hyperparameters.rbi @@ -0,0 +1,281 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class ReinforcementHyperparameters < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::ReinforcementHyperparameters, + OpenAI::Internal::AnyHash + ) + end + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + sig { returns(T.nilable(T.any(Symbol, Integer))) } + attr_reader :batch_size + + sig { params(batch_size: T.any(Symbol, Integer)).void } + attr_writer :batch_size + + # Multiplier on amount of compute used for exploring search space during training. + sig { returns(T.nilable(T.any(Symbol, Float))) } + attr_reader :compute_multiplier + + sig { params(compute_multiplier: T.any(Symbol, Float)).void } + attr_writer :compute_multiplier + + # The number of training steps between evaluation runs. + sig { returns(T.nilable(T.any(Symbol, Integer))) } + attr_reader :eval_interval + + sig { params(eval_interval: T.any(Symbol, Integer)).void } + attr_writer :eval_interval + + # Number of evaluation samples to generate per training step. + sig { returns(T.nilable(T.any(Symbol, Integer))) } + attr_reader :eval_samples + + sig { params(eval_samples: T.any(Symbol, Integer)).void } + attr_writer :eval_samples + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + sig { returns(T.nilable(T.any(Symbol, Float))) } + attr_reader :learning_rate_multiplier + + sig { params(learning_rate_multiplier: T.any(Symbol, Float)).void } + attr_writer :learning_rate_multiplier + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + sig { returns(T.nilable(T.any(Symbol, Integer))) } + attr_reader :n_epochs + + sig { params(n_epochs: T.any(Symbol, Integer)).void } + attr_writer :n_epochs + + # Level of reasoning effort. + sig do + returns( + T.nilable( + OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort::OrSymbol + ) + ) + end + attr_reader :reasoning_effort + + sig do + params( + reasoning_effort: + OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort::OrSymbol + ).void + end + attr_writer :reasoning_effort + + # The hyperparameters used for the reinforcement fine-tuning job. + sig do + params( + batch_size: T.any(Symbol, Integer), + compute_multiplier: T.any(Symbol, Float), + eval_interval: T.any(Symbol, Integer), + eval_samples: T.any(Symbol, Integer), + learning_rate_multiplier: T.any(Symbol, Float), + n_epochs: T.any(Symbol, Integer), + reasoning_effort: + OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + batch_size: nil, + # Multiplier on amount of compute used for exploring search space during training. + compute_multiplier: nil, + # The number of training steps between evaluation runs. + eval_interval: nil, + # Number of evaluation samples to generate per training step. + eval_samples: nil, + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + learning_rate_multiplier: nil, + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + n_epochs: nil, + # Level of reasoning effort. + reasoning_effort: nil + ) + end + + sig do + override.returns( + { + batch_size: T.any(Symbol, Integer), + compute_multiplier: T.any(Symbol, Float), + eval_interval: T.any(Symbol, Integer), + eval_samples: T.any(Symbol, Integer), + learning_rate_multiplier: T.any(Symbol, Float), + n_epochs: T.any(Symbol, Integer), + reasoning_effort: + OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort::OrSymbol + } + ) + end + def to_hash + end + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + module BatchSize + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::ReinforcementHyperparameters::BatchSize::Variants + ] + ) + end + def self.variants + end + end + + # Multiplier on amount of compute used for exploring search space during training. + module ComputeMultiplier + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Float) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::ReinforcementHyperparameters::ComputeMultiplier::Variants + ] + ) + end + def self.variants + end + end + + # The number of training steps between evaluation runs. + module EvalInterval + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::ReinforcementHyperparameters::EvalInterval::Variants + ] + ) + end + def self.variants + end + end + + # Number of evaluation samples to generate per training step. + module EvalSamples + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::ReinforcementHyperparameters::EvalSamples::Variants + ] + ) + end + def self.variants + end + end + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Float) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::ReinforcementHyperparameters::LearningRateMultiplier::Variants + ] + ) + end + def self.variants + end + end + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + module NEpochs + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::ReinforcementHyperparameters::NEpochs::Variants + ] + ) + end + def self.variants + end + end + + # Level of reasoning effort. + module ReasoningEffort + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + DEFAULT = + T.let( + :default, + OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort::TaggedSymbol + ) + LOW = + T.let( + :low, + OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort::TaggedSymbol + ) + MEDIUM = + T.let( + :medium, + OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/reinforcement_method.rbi b/rbi/openai/models/fine_tuning/reinforcement_method.rbi new file mode 100644 index 00000000..f49c9b5c --- /dev/null +++ b/rbi/openai/models/fine_tuning/reinforcement_method.rbi @@ -0,0 +1,112 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class ReinforcementMethod < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::ReinforcementMethod, + OpenAI::Internal::AnyHash + ) + end + + # The grader used for the fine-tuning job. + sig do + returns( + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::MultiGrader + ) + ) + end + attr_accessor :grader + + # The hyperparameters used for the reinforcement fine-tuning job. + sig do + returns(T.nilable(OpenAI::FineTuning::ReinforcementHyperparameters)) + end + attr_reader :hyperparameters + + sig do + params( + hyperparameters: + OpenAI::FineTuning::ReinforcementHyperparameters::OrHash + ).void + end + attr_writer :hyperparameters + + # Configuration for the reinforcement fine-tuning method. + sig do + params( + grader: + T.any( + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::Graders::TextSimilarityGrader::OrHash, + OpenAI::Graders::PythonGrader::OrHash, + OpenAI::Graders::ScoreModelGrader::OrHash, + OpenAI::Graders::MultiGrader::OrHash + ), + hyperparameters: + OpenAI::FineTuning::ReinforcementHyperparameters::OrHash + ).returns(T.attached_class) + end + def self.new( + # The grader used for the fine-tuning job. + grader:, + # The hyperparameters used for the reinforcement fine-tuning job. + hyperparameters: nil + ) + end + + sig do + override.returns( + { + grader: + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::MultiGrader + ), + hyperparameters: OpenAI::FineTuning::ReinforcementHyperparameters + } + ) + end + def to_hash + end + + # The grader used for the fine-tuning job. + module Grader + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::MultiGrader + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::ReinforcementMethod::Grader::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/supervised_hyperparameters.rbi b/rbi/openai/models/fine_tuning/supervised_hyperparameters.rbi new file mode 100644 index 00000000..dc8167ab --- /dev/null +++ b/rbi/openai/models/fine_tuning/supervised_hyperparameters.rbi @@ -0,0 +1,128 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class SupervisedHyperparameters < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::SupervisedHyperparameters, + OpenAI::Internal::AnyHash + ) + end + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + sig { returns(T.nilable(T.any(Symbol, Integer))) } + attr_reader :batch_size + + sig { params(batch_size: T.any(Symbol, Integer)).void } + attr_writer :batch_size + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + sig { returns(T.nilable(T.any(Symbol, Float))) } + attr_reader :learning_rate_multiplier + + sig { params(learning_rate_multiplier: T.any(Symbol, Float)).void } + attr_writer :learning_rate_multiplier + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + sig { returns(T.nilable(T.any(Symbol, Integer))) } + attr_reader :n_epochs + + sig { params(n_epochs: T.any(Symbol, Integer)).void } + attr_writer :n_epochs + + # The hyperparameters used for the fine-tuning job. + sig do + params( + batch_size: T.any(Symbol, Integer), + learning_rate_multiplier: T.any(Symbol, Float), + n_epochs: T.any(Symbol, Integer) + ).returns(T.attached_class) + end + def self.new( + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + batch_size: nil, + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + learning_rate_multiplier: nil, + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + n_epochs: nil + ) + end + + sig do + override.returns( + { + batch_size: T.any(Symbol, Integer), + learning_rate_multiplier: T.any(Symbol, Float), + n_epochs: T.any(Symbol, Integer) + } + ) + end + def to_hash + end + + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. + module BatchSize + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::SupervisedHyperparameters::BatchSize::Variants + ] + ) + end + def self.variants + end + end + + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Float) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::SupervisedHyperparameters::LearningRateMultiplier::Variants + ] + ) + end + def self.variants + end + end + + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. + module NEpochs + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(Symbol, Integer) } + + sig do + override.returns( + T::Array[ + OpenAI::FineTuning::SupervisedHyperparameters::NEpochs::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/fine_tuning/supervised_method.rbi b/rbi/openai/models/fine_tuning/supervised_method.rbi new file mode 100644 index 00000000..8b4b000e --- /dev/null +++ b/rbi/openai/models/fine_tuning/supervised_method.rbi @@ -0,0 +1,52 @@ +# typed: strong + +module OpenAI + module Models + module FineTuning + class SupervisedMethod < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::FineTuning::SupervisedMethod, + OpenAI::Internal::AnyHash + ) + end + + # The hyperparameters used for the fine-tuning job. + sig do + returns(T.nilable(OpenAI::FineTuning::SupervisedHyperparameters)) + end + attr_reader :hyperparameters + + sig do + params( + hyperparameters: + OpenAI::FineTuning::SupervisedHyperparameters::OrHash + ).void + end + attr_writer :hyperparameters + + # Configuration for the supervised fine-tuning method. + sig do + params( + hyperparameters: + OpenAI::FineTuning::SupervisedHyperparameters::OrHash + ).returns(T.attached_class) + end + def self.new( + # The hyperparameters used for the fine-tuning job. + hyperparameters: nil + ) + end + + sig do + override.returns( + { hyperparameters: OpenAI::FineTuning::SupervisedHyperparameters } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/function_definition.rbi b/rbi/openai/models/function_definition.rbi new file mode 100644 index 00000000..413025fd --- /dev/null +++ b/rbi/openai/models/function_definition.rbi @@ -0,0 +1,91 @@ +# typed: strong + +module OpenAI + module Models + class FunctionDefinition < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::FunctionDefinition, OpenAI::Internal::AnyHash) + end + + # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + # underscores and dashes, with a maximum length of 64. + sig { returns(String) } + attr_accessor :name + + # A description of what the function does, used by the model to choose when and + # how to call the function. + sig { returns(T.nilable(String)) } + attr_reader :description + + sig { params(description: String).void } + attr_writer :description + + # The parameters the functions accepts, described as a JSON Schema object. See the + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. + # + # Omitting `parameters` defines a function with an empty parameter list. + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :parameters + + sig { params(parameters: T::Hash[Symbol, T.anything]).void } + attr_writer :parameters + + # Whether to enable strict schema adherence when generating the function call. If + # set to true, the model will follow the exact schema defined in the `parameters` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn + # more about Structured Outputs in the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling). + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :strict + + sig do + params( + name: String, + description: String, + parameters: T::Hash[Symbol, T.anything], + strict: T.nilable(T::Boolean) + ).returns(T.attached_class) + end + def self.new( + # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + # underscores and dashes, with a maximum length of 64. + name:, + # A description of what the function does, used by the model to choose when and + # how to call the function. + description: nil, + # The parameters the functions accepts, described as a JSON Schema object. See the + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. + # + # Omitting `parameters` defines a function with an empty parameter list. + parameters: nil, + # Whether to enable strict schema adherence when generating the function call. If + # set to true, the model will follow the exact schema defined in the `parameters` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn + # more about Structured Outputs in the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling). + strict: nil + ) + end + + sig do + override.returns( + { + name: String, + description: String, + parameters: T::Hash[Symbol, T.anything], + strict: T.nilable(T::Boolean) + } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/function_parameters.rbi b/rbi/openai/models/function_parameters.rbi new file mode 100644 index 00000000..f7b78f39 --- /dev/null +++ b/rbi/openai/models/function_parameters.rbi @@ -0,0 +1,11 @@ +# typed: strong + +module OpenAI + module Models + FunctionParameters = + T.let( + OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown], + OpenAI::Internal::Type::Converter + ) + end +end diff --git a/rbi/openai/models/graders/label_model_grader.rbi b/rbi/openai/models/graders/label_model_grader.rbi new file mode 100644 index 00000000..22a3f239 --- /dev/null +++ b/rbi/openai/models/graders/label_model_grader.rbi @@ -0,0 +1,367 @@ +# typed: strong + +module OpenAI + module Models + LabelModelGrader = Graders::LabelModelGrader + + module Graders + class LabelModelGrader < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Graders::LabelModelGrader, OpenAI::Internal::AnyHash) + end + + sig { returns(T::Array[OpenAI::Graders::LabelModelGrader::Input]) } + attr_accessor :input + + # The labels to assign to each item in the evaluation. + sig { returns(T::Array[String]) } + attr_accessor :labels + + # The model to use for the evaluation. Must support structured outputs. + sig { returns(String) } + attr_accessor :model + + # The name of the grader. + sig { returns(String) } + attr_accessor :name + + # The labels that indicate a passing result. Must be a subset of labels. + sig { returns(T::Array[String]) } + attr_accessor :passing_labels + + # The object type, which is always `label_model`. + sig { returns(Symbol) } + attr_accessor :type + + # A LabelModelGrader object which uses a model to assign labels to each item in + # the evaluation. + sig do + params( + input: T::Array[OpenAI::Graders::LabelModelGrader::Input::OrHash], + labels: T::Array[String], + model: String, + name: String, + passing_labels: T::Array[String], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + input:, + # The labels to assign to each item in the evaluation. + labels:, + # The model to use for the evaluation. Must support structured outputs. + model:, + # The name of the grader. + name:, + # The labels that indicate a passing result. Must be a subset of labels. + passing_labels:, + # The object type, which is always `label_model`. + type: :label_model + ) + end + + sig do + override.returns( + { + input: T::Array[OpenAI::Graders::LabelModelGrader::Input], + labels: T::Array[String], + model: String, + name: String, + passing_labels: T::Array[String], + type: Symbol + } + ) + end + def to_hash + end + + class Input < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Graders::LabelModelGrader::Input, + OpenAI::Internal::AnyHash + ) + end + + # Inputs to the model - can contain template strings. + sig do + returns( + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Graders::LabelModelGrader::Input::Content::OutputText, + OpenAI::Graders::LabelModelGrader::Input::Content::InputImage, + T::Array[T.anything] + ) + ) + end + attr_accessor :content + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + sig do + returns(OpenAI::Graders::LabelModelGrader::Input::Role::OrSymbol) + end + attr_accessor :role + + # The type of the message input. Always `message`. + sig do + returns( + T.nilable( + OpenAI::Graders::LabelModelGrader::Input::Type::OrSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: OpenAI::Graders::LabelModelGrader::Input::Type::OrSymbol + ).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + sig do + params( + content: + T.any( + String, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Graders::LabelModelGrader::Input::Content::OutputText::OrHash, + OpenAI::Graders::LabelModelGrader::Input::Content::InputImage::OrHash, + T::Array[T.anything] + ), + role: OpenAI::Graders::LabelModelGrader::Input::Role::OrSymbol, + type: OpenAI::Graders::LabelModelGrader::Input::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Inputs to the model - can contain template strings. + content:, + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + role:, + # The type of the message input. Always `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Graders::LabelModelGrader::Input::Content::OutputText, + OpenAI::Graders::LabelModelGrader::Input::Content::InputImage, + T::Array[T.anything] + ), + role: OpenAI::Graders::LabelModelGrader::Input::Role::OrSymbol, + type: OpenAI::Graders::LabelModelGrader::Input::Type::OrSymbol + } + ) + end + def to_hash + end + + # Inputs to the model - can contain template strings. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Graders::LabelModelGrader::Input::Content::OutputText, + OpenAI::Graders::LabelModelGrader::Input::Content::InputImage, + T::Array[T.anything] + ) + end + + class OutputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Graders::LabelModelGrader::Input::Content::OutputText, + OpenAI::Internal::AnyHash + ) + end + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + # A text output from the model. + sig do + params(text: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # The text output from the model. + text:, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class InputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Graders::LabelModelGrader::Input::Content::InputImage, + OpenAI::Internal::AnyHash + ) + end + + # The URL of the image input. + sig { returns(String) } + attr_accessor :image_url + + # The type of the image input. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig { returns(T.nilable(String)) } + attr_reader :detail + + sig { params(detail: String).void } + attr_writer :detail + + # An image input to the model. + sig do + params(image_url: String, detail: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The URL of the image input. + image_url:, + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail: nil, + # The type of the image input. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { image_url: String, type: Symbol, detail: String } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Graders::LabelModelGrader::Input::Content::Variants + ] + ) + end + def self.variants + end + + AnArrayOfInputTextAndInputImageArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::Unknown + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Graders::LabelModelGrader::Input::Role) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Graders::LabelModelGrader::Input::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Graders::LabelModelGrader::Input::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Graders::LabelModelGrader::Input::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Graders::LabelModelGrader::Input::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Graders::LabelModelGrader::Input::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Graders::LabelModelGrader::Input::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Graders::LabelModelGrader::Input::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Graders::LabelModelGrader::Input::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/graders/multi_grader.rbi b/rbi/openai/models/graders/multi_grader.rbi new file mode 100644 index 00000000..63f598ad --- /dev/null +++ b/rbi/openai/models/graders/multi_grader.rbi @@ -0,0 +1,118 @@ +# typed: strong + +module OpenAI + module Models + MultiGrader = Graders::MultiGrader + + module Graders + class MultiGrader < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Graders::MultiGrader, OpenAI::Internal::AnyHash) + end + + # A formula to calculate the output based on grader results. + sig { returns(String) } + attr_accessor :calculate_output + + # A StringCheckGrader object that performs a string comparison between input and + # reference using a specified operation. + sig do + returns( + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::LabelModelGrader + ) + ) + end + attr_accessor :graders + + # The name of the grader. + sig { returns(String) } + attr_accessor :name + + # The object type, which is always `multi`. + sig { returns(Symbol) } + attr_accessor :type + + # A MultiGrader object combines the output of multiple graders to produce a single + # score. + sig do + params( + calculate_output: String, + graders: + T.any( + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::Graders::TextSimilarityGrader::OrHash, + OpenAI::Graders::PythonGrader::OrHash, + OpenAI::Graders::ScoreModelGrader::OrHash, + OpenAI::Graders::LabelModelGrader::OrHash + ), + name: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A formula to calculate the output based on grader results. + calculate_output:, + # A StringCheckGrader object that performs a string comparison between input and + # reference using a specified operation. + graders:, + # The name of the grader. + name:, + # The object type, which is always `multi`. + type: :multi + ) + end + + sig do + override.returns( + { + calculate_output: String, + graders: + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::LabelModelGrader + ), + name: String, + type: Symbol + } + ) + end + def to_hash + end + + # A StringCheckGrader object that performs a string comparison between input and + # reference using a specified operation. + module Graders + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Graders::StringCheckGrader, + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Graders::PythonGrader, + OpenAI::Graders::ScoreModelGrader, + OpenAI::Graders::LabelModelGrader + ) + end + + sig do + override.returns( + T::Array[OpenAI::Graders::MultiGrader::Graders::Variants] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/graders/python_grader.rbi b/rbi/openai/models/graders/python_grader.rbi new file mode 100644 index 00000000..ed504598 --- /dev/null +++ b/rbi/openai/models/graders/python_grader.rbi @@ -0,0 +1,64 @@ +# typed: strong + +module OpenAI + module Models + PythonGrader = Graders::PythonGrader + + module Graders + class PythonGrader < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Graders::PythonGrader, OpenAI::Internal::AnyHash) + end + + # The name of the grader. + sig { returns(String) } + attr_accessor :name + + # The source code of the python script. + sig { returns(String) } + attr_accessor :source + + # The object type, which is always `python`. + sig { returns(Symbol) } + attr_accessor :type + + # The image tag to use for the python script. + sig { returns(T.nilable(String)) } + attr_reader :image_tag + + sig { params(image_tag: String).void } + attr_writer :image_tag + + # A PythonGrader object that runs a python script on the input. + sig do + params( + name: String, + source: String, + image_tag: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The name of the grader. + name:, + # The source code of the python script. + source:, + # The image tag to use for the python script. + image_tag: nil, + # The object type, which is always `python`. + type: :python + ) + end + + sig do + override.returns( + { name: String, source: String, type: Symbol, image_tag: String } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/graders/score_model_grader.rbi b/rbi/openai/models/graders/score_model_grader.rbi new file mode 100644 index 00000000..7baa4347 --- /dev/null +++ b/rbi/openai/models/graders/score_model_grader.rbi @@ -0,0 +1,374 @@ +# typed: strong + +module OpenAI + module Models + ScoreModelGrader = Graders::ScoreModelGrader + + module Graders + class ScoreModelGrader < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Graders::ScoreModelGrader, OpenAI::Internal::AnyHash) + end + + # The input text. This may include template strings. + sig { returns(T::Array[OpenAI::Graders::ScoreModelGrader::Input]) } + attr_accessor :input + + # The model to use for the evaluation. + sig { returns(String) } + attr_accessor :model + + # The name of the grader. + sig { returns(String) } + attr_accessor :name + + # The object type, which is always `score_model`. + sig { returns(Symbol) } + attr_accessor :type + + # The range of the score. Defaults to `[0, 1]`. + sig { returns(T.nilable(T::Array[Float])) } + attr_reader :range + + sig { params(range: T::Array[Float]).void } + attr_writer :range + + # The sampling parameters for the model. + sig { returns(T.nilable(T.anything)) } + attr_reader :sampling_params + + sig { params(sampling_params: T.anything).void } + attr_writer :sampling_params + + # A ScoreModelGrader object that uses a model to assign a score to the input. + sig do + params( + input: T::Array[OpenAI::Graders::ScoreModelGrader::Input::OrHash], + model: String, + name: String, + range: T::Array[Float], + sampling_params: T.anything, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The input text. This may include template strings. + input:, + # The model to use for the evaluation. + model:, + # The name of the grader. + name:, + # The range of the score. Defaults to `[0, 1]`. + range: nil, + # The sampling parameters for the model. + sampling_params: nil, + # The object type, which is always `score_model`. + type: :score_model + ) + end + + sig do + override.returns( + { + input: T::Array[OpenAI::Graders::ScoreModelGrader::Input], + model: String, + name: String, + type: Symbol, + range: T::Array[Float], + sampling_params: T.anything + } + ) + end + def to_hash + end + + class Input < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Graders::ScoreModelGrader::Input, + OpenAI::Internal::AnyHash + ) + end + + # Inputs to the model - can contain template strings. + sig do + returns( + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText, + OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage, + T::Array[T.anything] + ) + ) + end + attr_accessor :content + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + sig do + returns(OpenAI::Graders::ScoreModelGrader::Input::Role::OrSymbol) + end + attr_accessor :role + + # The type of the message input. Always `message`. + sig do + returns( + T.nilable( + OpenAI::Graders::ScoreModelGrader::Input::Type::OrSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: OpenAI::Graders::ScoreModelGrader::Input::Type::OrSymbol + ).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + sig do + params( + content: + T.any( + String, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText::OrHash, + OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage::OrHash, + T::Array[T.anything] + ), + role: OpenAI::Graders::ScoreModelGrader::Input::Role::OrSymbol, + type: OpenAI::Graders::ScoreModelGrader::Input::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Inputs to the model - can contain template strings. + content:, + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + role:, + # The type of the message input. Always `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText, + OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage, + T::Array[T.anything] + ), + role: OpenAI::Graders::ScoreModelGrader::Input::Role::OrSymbol, + type: OpenAI::Graders::ScoreModelGrader::Input::Type::OrSymbol + } + ) + end + def to_hash + end + + # Inputs to the model - can contain template strings. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText, + OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage, + T::Array[T.anything] + ) + end + + class OutputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText, + OpenAI::Internal::AnyHash + ) + end + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + # A text output from the model. + sig do + params(text: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # The text output from the model. + text:, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class InputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage, + OpenAI::Internal::AnyHash + ) + end + + # The URL of the image input. + sig { returns(String) } + attr_accessor :image_url + + # The type of the image input. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig { returns(T.nilable(String)) } + attr_reader :detail + + sig { params(detail: String).void } + attr_writer :detail + + # An image input to the model. + sig do + params(image_url: String, detail: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The URL of the image input. + image_url:, + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail: nil, + # The type of the image input. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { image_url: String, type: Symbol, detail: String } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Graders::ScoreModelGrader::Input::Content::Variants + ] + ) + end + def self.variants + end + + AnArrayOfInputTextAndInputImageArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + OpenAI::Internal::Type::Unknown + ], + OpenAI::Internal::Type::Converter + ) + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Graders::ScoreModelGrader::Input::Role) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Graders::ScoreModelGrader::Input::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Graders::ScoreModelGrader::Input::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Graders::ScoreModelGrader::Input::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Graders::ScoreModelGrader::Input::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Graders::ScoreModelGrader::Input::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Graders::ScoreModelGrader::Input::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Graders::ScoreModelGrader::Input::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Graders::ScoreModelGrader::Input::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/graders/string_check_grader.rbi b/rbi/openai/models/graders/string_check_grader.rbi new file mode 100644 index 00000000..f46aca63 --- /dev/null +++ b/rbi/openai/models/graders/string_check_grader.rbi @@ -0,0 +1,118 @@ +# typed: strong + +module OpenAI + module Models + StringCheckGrader = Graders::StringCheckGrader + + module Graders + class StringCheckGrader < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Graders::StringCheckGrader, OpenAI::Internal::AnyHash) + end + + # The input text. This may include template strings. + sig { returns(String) } + attr_accessor :input + + # The name of the grader. + sig { returns(String) } + attr_accessor :name + + # The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + sig { returns(OpenAI::Graders::StringCheckGrader::Operation::OrSymbol) } + attr_accessor :operation + + # The reference text. This may include template strings. + sig { returns(String) } + attr_accessor :reference + + # The object type, which is always `string_check`. + sig { returns(Symbol) } + attr_accessor :type + + # A StringCheckGrader object that performs a string comparison between input and + # reference using a specified operation. + sig do + params( + input: String, + name: String, + operation: OpenAI::Graders::StringCheckGrader::Operation::OrSymbol, + reference: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The input text. This may include template strings. + input:, + # The name of the grader. + name:, + # The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + operation:, + # The reference text. This may include template strings. + reference:, + # The object type, which is always `string_check`. + type: :string_check + ) + end + + sig do + override.returns( + { + input: String, + name: String, + operation: + OpenAI::Graders::StringCheckGrader::Operation::OrSymbol, + reference: String, + type: Symbol + } + ) + end + def to_hash + end + + # The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + module Operation + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Graders::StringCheckGrader::Operation) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EQ = + T.let( + :eq, + OpenAI::Graders::StringCheckGrader::Operation::TaggedSymbol + ) + NE = + T.let( + :ne, + OpenAI::Graders::StringCheckGrader::Operation::TaggedSymbol + ) + LIKE = + T.let( + :like, + OpenAI::Graders::StringCheckGrader::Operation::TaggedSymbol + ) + ILIKE = + T.let( + :ilike, + OpenAI::Graders::StringCheckGrader::Operation::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Graders::StringCheckGrader::Operation::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/graders/text_similarity_grader.rbi b/rbi/openai/models/graders/text_similarity_grader.rbi new file mode 100644 index 00000000..389c8b3b --- /dev/null +++ b/rbi/openai/models/graders/text_similarity_grader.rbi @@ -0,0 +1,166 @@ +# typed: strong + +module OpenAI + module Models + TextSimilarityGrader = Graders::TextSimilarityGrader + + module Graders + class TextSimilarityGrader < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Graders::TextSimilarityGrader, + OpenAI::Internal::AnyHash + ) + end + + # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + sig do + returns( + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::OrSymbol + ) + end + attr_accessor :evaluation_metric + + # The text being graded. + sig { returns(String) } + attr_accessor :input + + # The name of the grader. + sig { returns(String) } + attr_accessor :name + + # The text being graded against. + sig { returns(String) } + attr_accessor :reference + + # The type of grader. + sig { returns(Symbol) } + attr_accessor :type + + # A TextSimilarityGrader object which grades text based on similarity metrics. + sig do + params( + evaluation_metric: + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::OrSymbol, + input: String, + name: String, + reference: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + evaluation_metric:, + # The text being graded. + input:, + # The name of the grader. + name:, + # The text being graded against. + reference:, + # The type of grader. + type: :text_similarity + ) + end + + sig do + override.returns( + { + evaluation_metric: + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::OrSymbol, + input: String, + name: String, + reference: String, + type: Symbol + } + ) + end + def to_hash + end + + # The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + # `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + module EvaluationMetric + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + COSINE = + T.let( + :cosine, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + FUZZY_MATCH = + T.let( + :fuzzy_match, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + BLEU = + T.let( + :bleu, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + GLEU = + T.let( + :gleu, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + METEOR = + T.let( + :meteor, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + ROUGE_1 = + T.let( + :rouge_1, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + ROUGE_2 = + T.let( + :rouge_2, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + ROUGE_3 = + T.let( + :rouge_3, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + ROUGE_4 = + T.let( + :rouge_4, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + ROUGE_5 = + T.let( + :rouge_5, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + ROUGE_L = + T.let( + :rouge_l, + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Graders::TextSimilarityGrader::EvaluationMetric::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/image.rbi b/rbi/openai/models/image.rbi new file mode 100644 index 00000000..6c01cf82 --- /dev/null +++ b/rbi/openai/models/image.rbi @@ -0,0 +1,62 @@ +# typed: strong + +module OpenAI + module Models + class Image < OpenAI::Internal::Type::BaseModel + OrHash = T.type_alias { T.any(OpenAI::Image, OpenAI::Internal::AnyHash) } + + # The base64-encoded JSON of the generated image. Default value for `gpt-image-1`, + # and only present if `response_format` is set to `b64_json` for `dall-e-2` and + # `dall-e-3`. + sig { returns(T.nilable(String)) } + attr_reader :b64_json + + sig { params(b64_json: String).void } + attr_writer :b64_json + + # For `dall-e-3` only, the revised prompt that was used to generate the image. + sig { returns(T.nilable(String)) } + attr_reader :revised_prompt + + sig { params(revised_prompt: String).void } + attr_writer :revised_prompt + + # When using `dall-e-2` or `dall-e-3`, the URL of the generated image if + # `response_format` is set to `url` (default value). Unsupported for + # `gpt-image-1`. + sig { returns(T.nilable(String)) } + attr_reader :url + + sig { params(url: String).void } + attr_writer :url + + # Represents the content or the URL of an image generated by the OpenAI API. + sig do + params(b64_json: String, revised_prompt: String, url: String).returns( + T.attached_class + ) + end + def self.new( + # The base64-encoded JSON of the generated image. Default value for `gpt-image-1`, + # and only present if `response_format` is set to `b64_json` for `dall-e-2` and + # `dall-e-3`. + b64_json: nil, + # For `dall-e-3` only, the revised prompt that was used to generate the image. + revised_prompt: nil, + # When using `dall-e-2` or `dall-e-3`, the URL of the generated image if + # `response_format` is set to `url` (default value). Unsupported for + # `gpt-image-1`. + url: nil + ) + end + + sig do + override.returns( + { b64_json: String, revised_prompt: String, url: String } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/image_create_variation_params.rbi b/rbi/openai/models/image_create_variation_params.rbi new file mode 100644 index 00000000..1c53b22b --- /dev/null +++ b/rbi/openai/models/image_create_variation_params.rbi @@ -0,0 +1,201 @@ +# typed: strong + +module OpenAI + module Models + class ImageCreateVariationParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ImageCreateVariationParams, OpenAI::Internal::AnyHash) + end + + # The image to use as the basis for the variation(s). Must be a valid PNG file, + # less than 4MB, and square. + sig { returns(OpenAI::Internal::FileInput) } + attr_accessor :image + + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. + sig { returns(T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol))) } + attr_accessor :model + + # The number of images to generate. Must be between 1 and 10. + sig { returns(T.nilable(Integer)) } + attr_accessor :n + + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. + sig do + returns( + T.nilable( + OpenAI::ImageCreateVariationParams::ResponseFormat::OrSymbol + ) + ) + end + attr_accessor :response_format + + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. + sig do + returns(T.nilable(OpenAI::ImageCreateVariationParams::Size::OrSymbol)) + end + attr_accessor :size + + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + sig { returns(T.nilable(String)) } + attr_reader :user + + sig { params(user: String).void } + attr_writer :user + + sig do + params( + image: OpenAI::Internal::FileInput, + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + n: T.nilable(Integer), + response_format: + T.nilable( + OpenAI::ImageCreateVariationParams::ResponseFormat::OrSymbol + ), + size: T.nilable(OpenAI::ImageCreateVariationParams::Size::OrSymbol), + user: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The image to use as the basis for the variation(s). Must be a valid PNG file, + # less than 4MB, and square. + image:, + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. + model: nil, + # The number of images to generate. Must be between 1 and 10. + n: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. + response_format: nil, + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. + size: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + image: OpenAI::Internal::FileInput, + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + n: T.nilable(Integer), + response_format: + T.nilable( + OpenAI::ImageCreateVariationParams::ResponseFormat::OrSymbol + ), + size: T.nilable(OpenAI::ImageCreateVariationParams::Size::OrSymbol), + user: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::ImageModel::TaggedSymbol) } + + sig do + override.returns( + T::Array[OpenAI::ImageCreateVariationParams::Model::Variants] + ) + end + def self.variants + end + end + + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. + module ResponseFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageCreateVariationParams::ResponseFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + URL = + T.let( + :url, + OpenAI::ImageCreateVariationParams::ResponseFormat::TaggedSymbol + ) + B64_JSON = + T.let( + :b64_json, + OpenAI::ImageCreateVariationParams::ResponseFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageCreateVariationParams::ResponseFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageCreateVariationParams::Size) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_256X256 = + T.let( + :"256x256", + OpenAI::ImageCreateVariationParams::Size::TaggedSymbol + ) + SIZE_512X512 = + T.let( + :"512x512", + OpenAI::ImageCreateVariationParams::Size::TaggedSymbol + ) + SIZE_1024X1024 = + T.let( + :"1024x1024", + OpenAI::ImageCreateVariationParams::Size::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::ImageCreateVariationParams::Size::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/image_edit_completed_event.rbi b/rbi/openai/models/image_edit_completed_event.rbi new file mode 100644 index 00000000..07cfa0ae --- /dev/null +++ b/rbi/openai/models/image_edit_completed_event.rbi @@ -0,0 +1,346 @@ +# typed: strong + +module OpenAI + module Models + class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ImageEditCompletedEvent, OpenAI::Internal::AnyHash) + end + + # Base64-encoded final edited image data, suitable for rendering as an image. + sig { returns(String) } + attr_accessor :b64_json + + # The background setting for the edited image. + sig { returns(OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol) } + attr_accessor :background + + # The Unix timestamp when the event was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The output format for the edited image. + sig do + returns(OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol) + end + attr_accessor :output_format + + # The quality setting for the edited image. + sig { returns(OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) } + attr_accessor :quality + + # The size of the edited image. + sig { returns(OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol) } + attr_accessor :size + + # The type of the event. Always `image_edit.completed`. + sig { returns(Symbol) } + attr_accessor :type + + # For `gpt-image-1` only, the token usage information for the image generation. + sig { returns(OpenAI::ImageEditCompletedEvent::Usage) } + attr_reader :usage + + sig { params(usage: OpenAI::ImageEditCompletedEvent::Usage::OrHash).void } + attr_writer :usage + + # Emitted when image editing has completed and the final image is available. + sig do + params( + b64_json: String, + background: OpenAI::ImageEditCompletedEvent::Background::OrSymbol, + created_at: Integer, + output_format: + OpenAI::ImageEditCompletedEvent::OutputFormat::OrSymbol, + quality: OpenAI::ImageEditCompletedEvent::Quality::OrSymbol, + size: OpenAI::ImageEditCompletedEvent::Size::OrSymbol, + usage: OpenAI::ImageEditCompletedEvent::Usage::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Base64-encoded final edited image data, suitable for rendering as an image. + b64_json:, + # The background setting for the edited image. + background:, + # The Unix timestamp when the event was created. + created_at:, + # The output format for the edited image. + output_format:, + # The quality setting for the edited image. + quality:, + # The size of the edited image. + size:, + # For `gpt-image-1` only, the token usage information for the image generation. + usage:, + # The type of the event. Always `image_edit.completed`. + type: :"image_edit.completed" + ) + end + + sig do + override.returns( + { + b64_json: String, + background: + OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol, + created_at: Integer, + output_format: + OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol, + quality: OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol, + size: OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol, + type: Symbol, + usage: OpenAI::ImageEditCompletedEvent::Usage + } + ) + end + def to_hash + end + + # The background setting for the edited image. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditCompletedEvent::Background) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let( + :transparent, + OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol + ) + OPAQUE = + T.let( + :opaque, + OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol] + ) + end + def self.values + end + end + + # The output format for the edited image. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditCompletedEvent::OutputFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = + T.let( + :png, + OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol + ) + WEBP = + T.let( + :webp, + OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol + ) + JPEG = + T.let( + :jpeg, + OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The quality setting for the edited image. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditCompletedEvent::Quality) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let(:low, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) + MEDIUM = + T.let(:medium, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) + HIGH = + T.let(:high, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) + AUTO = + T.let(:auto, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the edited image. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageEditCompletedEvent::Size) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_1024X1024 = + T.let( + :"1024x1024", + OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol + ) + SIZE_1024X1536 = + T.let( + :"1024x1536", + OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol + ) + SIZE_1536X1024 = + T.let( + :"1536x1024", + OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol + ) + AUTO = T.let(:auto, OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol] + ) + end + def self.values + end + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ImageEditCompletedEvent::Usage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens (images and text) in the input prompt. + sig { returns(Integer) } + attr_accessor :input_tokens + + # The input tokens detailed information for the image generation. + sig do + returns(OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails) + end + attr_reader :input_tokens_details + + sig do + params( + input_tokens_details: + OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails::OrHash + ).void + end + attr_writer :input_tokens_details + + # The number of image tokens in the output image. + sig { returns(Integer) } + attr_accessor :output_tokens + + # The total number of tokens (images and text) used for the image generation. + sig { returns(Integer) } + attr_accessor :total_tokens + + # For `gpt-image-1` only, the token usage information for the image generation. + sig do + params( + input_tokens: Integer, + input_tokens_details: + OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails::OrHash, + output_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens (images and text) in the input prompt. + input_tokens:, + # The input tokens detailed information for the image generation. + input_tokens_details:, + # The number of image tokens in the output image. + output_tokens:, + # The total number of tokens (images and text) used for the image generation. + total_tokens: + ) + end + + sig do + override.returns( + { + input_tokens: Integer, + input_tokens_details: + OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails, + OpenAI::Internal::AnyHash + ) + end + + # The number of image tokens in the input prompt. + sig { returns(Integer) } + attr_accessor :image_tokens + + # The number of text tokens in the input prompt. + sig { returns(Integer) } + attr_accessor :text_tokens + + # The input tokens detailed information for the image generation. + sig do + params(image_tokens: Integer, text_tokens: Integer).returns( + T.attached_class + ) + end + def self.new( + # The number of image tokens in the input prompt. + image_tokens:, + # The number of text tokens in the input prompt. + text_tokens: + ) + end + + sig do + override.returns({ image_tokens: Integer, text_tokens: Integer }) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/image_edit_params.rbi b/rbi/openai/models/image_edit_params.rbi new file mode 100644 index 00000000..742bab15 --- /dev/null +++ b/rbi/openai/models/image_edit_params.rbi @@ -0,0 +1,444 @@ +# typed: strong + +module OpenAI + module Models + class ImageEditParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ImageEditParams, OpenAI::Internal::AnyHash) + end + + # The image(s) to edit. Must be a supported image file or an array of images. + # + # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + # 50MB. You can provide up to 16 images. + # + # For `dall-e-2`, you can only provide one image, and it should be a square `png` + # file less than 4MB. + sig { returns(OpenAI::ImageEditParams::Image::Variants) } + attr_accessor :image + + # A text description of the desired image(s). The maximum length is 1000 + # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + sig { returns(String) } + attr_accessor :prompt + + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + sig { returns(T.nilable(OpenAI::ImageEditParams::Background::OrSymbol)) } + attr_accessor :background + + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + sig do + returns(T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol)) + end + attr_accessor :input_fidelity + + # An additional image whose fully transparent areas (e.g. where alpha is zero) + # indicate where `image` should be edited. If there are multiple images provided, + # the mask will be applied on the first image. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. + sig { returns(T.nilable(OpenAI::Internal::FileInput)) } + attr_reader :mask + + sig { params(mask: OpenAI::Internal::FileInput).void } + attr_writer :mask + + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + # is used. + sig { returns(T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol))) } + attr_accessor :model + + # The number of images to generate. Must be between 1 and 10. + sig { returns(T.nilable(Integer)) } + attr_accessor :n + + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + sig { returns(T.nilable(Integer)) } + attr_accessor :output_compression + + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + # default value is `png`. + sig do + returns(T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol)) + end + attr_accessor :output_format + + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # Note that the final image may be sent before the full number of partial images + # are generated if the full image is generated more quickly. + sig { returns(T.nilable(Integer)) } + attr_accessor :partial_images + + # The quality of the image that will be generated. `high`, `medium` and `low` are + # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + # Defaults to `auto`. + sig { returns(T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol)) } + attr_accessor :quality + + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + # will always return base64-encoded images. + sig do + returns(T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol)) + end + attr_accessor :response_format + + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + sig { returns(T.nilable(OpenAI::ImageEditParams::Size::OrSymbol)) } + attr_accessor :size + + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + sig { returns(T.nilable(String)) } + attr_reader :user + + sig { params(user: String).void } + attr_writer :user + + sig do + params( + image: OpenAI::ImageEditParams::Image::Variants, + prompt: String, + background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol), + input_fidelity: + T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol), + mask: OpenAI::Internal::FileInput, + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + n: T.nilable(Integer), + output_compression: T.nilable(Integer), + output_format: + T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), + quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol), + response_format: + T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol), + size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol), + user: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The image(s) to edit. Must be a supported image file or an array of images. + # + # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + # 50MB. You can provide up to 16 images. + # + # For `dall-e-2`, you can only provide one image, and it should be a square `png` + # file less than 4MB. + image:, + # A text description of the desired image(s). The maximum length is 1000 + # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + prompt:, + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + background: nil, + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: nil, + # An additional image whose fully transparent areas (e.g. where alpha is zero) + # indicate where `image` should be edited. If there are multiple images provided, + # the mask will be applied on the first image. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. + mask: nil, + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + # is used. + model: nil, + # The number of images to generate. Must be between 1 and 10. + n: nil, + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + output_compression: nil, + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + # default value is `png`. + output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # Note that the final image may be sent before the full number of partial images + # are generated if the full image is generated more quickly. + partial_images: nil, + # The quality of the image that will be generated. `high`, `medium` and `low` are + # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + # Defaults to `auto`. + quality: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + # will always return base64-encoded images. + response_format: nil, + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + size: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + image: OpenAI::ImageEditParams::Image::Variants, + prompt: String, + background: + T.nilable(OpenAI::ImageEditParams::Background::OrSymbol), + input_fidelity: + T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol), + mask: OpenAI::Internal::FileInput, + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + n: T.nilable(Integer), + output_compression: T.nilable(Integer), + output_format: + T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), + quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol), + response_format: + T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol), + size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol), + user: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The image(s) to edit. Must be a supported image file or an array of images. + # + # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + # 50MB. You can provide up to 16 images. + # + # For `dall-e-2`, you can only provide one image, and it should be a square `png` + # file less than 4MB. + module Image + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(StringIO, T::Array[StringIO]) } + + sig do + override.returns(T::Array[OpenAI::ImageEditParams::Image::Variants]) + end + def self.variants + end + + StringArray = + T.let( + OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::FileInput], + OpenAI::Internal::Type::Converter + ) + end + + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::Background) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let(:transparent, OpenAI::ImageEditParams::Background::TaggedSymbol) + OPAQUE = + T.let(:opaque, OpenAI::ImageEditParams::Background::TaggedSymbol) + AUTO = T.let(:auto, OpenAI::ImageEditParams::Background::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditParams::Background::TaggedSymbol] + ) + end + def self.values + end + end + + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + module InputFidelity + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::InputFidelity) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + HIGH = + T.let(:high, OpenAI::ImageEditParams::InputFidelity::TaggedSymbol) + LOW = T.let(:low, OpenAI::ImageEditParams::InputFidelity::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditParams::InputFidelity::TaggedSymbol] + ) + end + def self.values + end + end + + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + # is used. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::ImageModel::TaggedSymbol) } + + sig do + override.returns(T::Array[OpenAI::ImageEditParams::Model::Variants]) + end + def self.variants + end + end + + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + # default value is `png`. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::OutputFormat) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = T.let(:png, OpenAI::ImageEditParams::OutputFormat::TaggedSymbol) + JPEG = T.let(:jpeg, OpenAI::ImageEditParams::OutputFormat::TaggedSymbol) + WEBP = T.let(:webp, OpenAI::ImageEditParams::OutputFormat::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditParams::OutputFormat::TaggedSymbol] + ) + end + def self.values + end + end + + # The quality of the image that will be generated. `high`, `medium` and `low` are + # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + # Defaults to `auto`. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::Quality) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + STANDARD = + T.let(:standard, OpenAI::ImageEditParams::Quality::TaggedSymbol) + LOW = T.let(:low, OpenAI::ImageEditParams::Quality::TaggedSymbol) + MEDIUM = T.let(:medium, OpenAI::ImageEditParams::Quality::TaggedSymbol) + HIGH = T.let(:high, OpenAI::ImageEditParams::Quality::TaggedSymbol) + AUTO = T.let(:auto, OpenAI::ImageEditParams::Quality::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditParams::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + # will always return base64-encoded images. + module ResponseFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditParams::ResponseFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + URL = T.let(:url, OpenAI::ImageEditParams::ResponseFormat::TaggedSymbol) + B64_JSON = + T.let( + :b64_json, + OpenAI::ImageEditParams::ResponseFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::ImageEditParams::ResponseFormat::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::Size) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_256X256 = + T.let(:"256x256", OpenAI::ImageEditParams::Size::TaggedSymbol) + SIZE_512X512 = + T.let(:"512x512", OpenAI::ImageEditParams::Size::TaggedSymbol) + SIZE_1024X1024 = + T.let(:"1024x1024", OpenAI::ImageEditParams::Size::TaggedSymbol) + SIZE_1536X1024 = + T.let(:"1536x1024", OpenAI::ImageEditParams::Size::TaggedSymbol) + SIZE_1024X1536 = + T.let(:"1024x1536", OpenAI::ImageEditParams::Size::TaggedSymbol) + AUTO = T.let(:auto, OpenAI::ImageEditParams::Size::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditParams::Size::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/image_edit_partial_image_event.rbi b/rbi/openai/models/image_edit_partial_image_event.rbi new file mode 100644 index 00000000..f4ae7d8b --- /dev/null +++ b/rbi/openai/models/image_edit_partial_image_event.rbi @@ -0,0 +1,249 @@ +# typed: strong + +module OpenAI + module Models + class ImageEditPartialImageEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ImageEditPartialImageEvent, OpenAI::Internal::AnyHash) + end + + # Base64-encoded partial image data, suitable for rendering as an image. + sig { returns(String) } + attr_accessor :b64_json + + # The background setting for the requested edited image. + sig do + returns(OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol) + end + attr_accessor :background + + # The Unix timestamp when the event was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The output format for the requested edited image. + sig do + returns(OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol) + end + attr_accessor :output_format + + # 0-based index for the partial image (streaming). + sig { returns(Integer) } + attr_accessor :partial_image_index + + # The quality setting for the requested edited image. + sig { returns(OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol) } + attr_accessor :quality + + # The size of the requested edited image. + sig { returns(OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol) } + attr_accessor :size + + # The type of the event. Always `image_edit.partial_image`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a partial image is available during image editing streaming. + sig do + params( + b64_json: String, + background: OpenAI::ImageEditPartialImageEvent::Background::OrSymbol, + created_at: Integer, + output_format: + OpenAI::ImageEditPartialImageEvent::OutputFormat::OrSymbol, + partial_image_index: Integer, + quality: OpenAI::ImageEditPartialImageEvent::Quality::OrSymbol, + size: OpenAI::ImageEditPartialImageEvent::Size::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Base64-encoded partial image data, suitable for rendering as an image. + b64_json:, + # The background setting for the requested edited image. + background:, + # The Unix timestamp when the event was created. + created_at:, + # The output format for the requested edited image. + output_format:, + # 0-based index for the partial image (streaming). + partial_image_index:, + # The quality setting for the requested edited image. + quality:, + # The size of the requested edited image. + size:, + # The type of the event. Always `image_edit.partial_image`. + type: :"image_edit.partial_image" + ) + end + + sig do + override.returns( + { + b64_json: String, + background: + OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol, + created_at: Integer, + output_format: + OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol, + partial_image_index: Integer, + quality: OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol, + size: OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The background setting for the requested edited image. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditPartialImageEvent::Background) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let( + :transparent, + OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol + ) + OPAQUE = + T.let( + :opaque, + OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageEditPartialImageEvent::Background::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The output format for the requested edited image. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditPartialImageEvent::OutputFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = + T.let( + :png, + OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol + ) + WEBP = + T.let( + :webp, + OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol + ) + JPEG = + T.let( + :jpeg, + OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageEditPartialImageEvent::OutputFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The quality setting for the requested edited image. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditPartialImageEvent::Quality) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let(:low, OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol) + MEDIUM = + T.let( + :medium, + OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::ImageEditPartialImageEvent::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the requested edited image. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageEditPartialImageEvent::Size) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_1024X1024 = + T.let( + :"1024x1024", + OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol + ) + SIZE_1024X1536 = + T.let( + :"1024x1536", + OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol + ) + SIZE_1536X1024 = + T.let( + :"1536x1024", + OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol + ) + AUTO = + T.let(:auto, OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageEditPartialImageEvent::Size::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/image_edit_stream_event.rbi b/rbi/openai/models/image_edit_stream_event.rbi new file mode 100644 index 00000000..5bfaed0a --- /dev/null +++ b/rbi/openai/models/image_edit_stream_event.rbi @@ -0,0 +1,22 @@ +# typed: strong + +module OpenAI + module Models + # Emitted when a partial image is available during image editing streaming. + module ImageEditStreamEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::ImageEditPartialImageEvent, + OpenAI::ImageEditCompletedEvent + ) + end + + sig { override.returns(T::Array[OpenAI::ImageEditStreamEvent::Variants]) } + def self.variants + end + end + end +end diff --git a/rbi/openai/models/image_gen_completed_event.rbi b/rbi/openai/models/image_gen_completed_event.rbi new file mode 100644 index 00000000..922b39b7 --- /dev/null +++ b/rbi/openai/models/image_gen_completed_event.rbi @@ -0,0 +1,339 @@ +# typed: strong + +module OpenAI + module Models + class ImageGenCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ImageGenCompletedEvent, OpenAI::Internal::AnyHash) + end + + # Base64-encoded image data, suitable for rendering as an image. + sig { returns(String) } + attr_accessor :b64_json + + # The background setting for the generated image. + sig { returns(OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol) } + attr_accessor :background + + # The Unix timestamp when the event was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The output format for the generated image. + sig do + returns(OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol) + end + attr_accessor :output_format + + # The quality setting for the generated image. + sig { returns(OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol) } + attr_accessor :quality + + # The size of the generated image. + sig { returns(OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol) } + attr_accessor :size + + # The type of the event. Always `image_generation.completed`. + sig { returns(Symbol) } + attr_accessor :type + + # For `gpt-image-1` only, the token usage information for the image generation. + sig { returns(OpenAI::ImageGenCompletedEvent::Usage) } + attr_reader :usage + + sig { params(usage: OpenAI::ImageGenCompletedEvent::Usage::OrHash).void } + attr_writer :usage + + # Emitted when image generation has completed and the final image is available. + sig do + params( + b64_json: String, + background: OpenAI::ImageGenCompletedEvent::Background::OrSymbol, + created_at: Integer, + output_format: OpenAI::ImageGenCompletedEvent::OutputFormat::OrSymbol, + quality: OpenAI::ImageGenCompletedEvent::Quality::OrSymbol, + size: OpenAI::ImageGenCompletedEvent::Size::OrSymbol, + usage: OpenAI::ImageGenCompletedEvent::Usage::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Base64-encoded image data, suitable for rendering as an image. + b64_json:, + # The background setting for the generated image. + background:, + # The Unix timestamp when the event was created. + created_at:, + # The output format for the generated image. + output_format:, + # The quality setting for the generated image. + quality:, + # The size of the generated image. + size:, + # For `gpt-image-1` only, the token usage information for the image generation. + usage:, + # The type of the event. Always `image_generation.completed`. + type: :"image_generation.completed" + ) + end + + sig do + override.returns( + { + b64_json: String, + background: + OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol, + created_at: Integer, + output_format: + OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol, + quality: OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol, + size: OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol, + type: Symbol, + usage: OpenAI::ImageGenCompletedEvent::Usage + } + ) + end + def to_hash + end + + # The background setting for the generated image. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenCompletedEvent::Background) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let( + :transparent, + OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol + ) + OPAQUE = + T.let( + :opaque, + OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol + ) + AUTO = + T.let(:auto, OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenCompletedEvent::Background::TaggedSymbol] + ) + end + def self.values + end + end + + # The output format for the generated image. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenCompletedEvent::OutputFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = + T.let( + :png, + OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol + ) + WEBP = + T.let( + :webp, + OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol + ) + JPEG = + T.let( + :jpeg, + OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::ImageGenCompletedEvent::OutputFormat::TaggedSymbol] + ) + end + def self.values + end + end + + # The quality setting for the generated image. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenCompletedEvent::Quality) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = T.let(:low, OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol) + MEDIUM = + T.let(:medium, OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol) + HIGH = + T.let(:high, OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol) + AUTO = + T.let(:auto, OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenCompletedEvent::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the generated image. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageGenCompletedEvent::Size) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_1024X1024 = + T.let( + :"1024x1024", + OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol + ) + SIZE_1024X1536 = + T.let( + :"1024x1536", + OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol + ) + SIZE_1536X1024 = + T.let( + :"1536x1024", + OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol + ) + AUTO = T.let(:auto, OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenCompletedEvent::Size::TaggedSymbol] + ) + end + def self.values + end + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ImageGenCompletedEvent::Usage, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens (images and text) in the input prompt. + sig { returns(Integer) } + attr_accessor :input_tokens + + # The input tokens detailed information for the image generation. + sig do + returns(OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails) + end + attr_reader :input_tokens_details + + sig do + params( + input_tokens_details: + OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails::OrHash + ).void + end + attr_writer :input_tokens_details + + # The number of image tokens in the output image. + sig { returns(Integer) } + attr_accessor :output_tokens + + # The total number of tokens (images and text) used for the image generation. + sig { returns(Integer) } + attr_accessor :total_tokens + + # For `gpt-image-1` only, the token usage information for the image generation. + sig do + params( + input_tokens: Integer, + input_tokens_details: + OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails::OrHash, + output_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens (images and text) in the input prompt. + input_tokens:, + # The input tokens detailed information for the image generation. + input_tokens_details:, + # The number of image tokens in the output image. + output_tokens:, + # The total number of tokens (images and text) used for the image generation. + total_tokens: + ) + end + + sig do + override.returns( + { + input_tokens: Integer, + input_tokens_details: + OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails, + OpenAI::Internal::AnyHash + ) + end + + # The number of image tokens in the input prompt. + sig { returns(Integer) } + attr_accessor :image_tokens + + # The number of text tokens in the input prompt. + sig { returns(Integer) } + attr_accessor :text_tokens + + # The input tokens detailed information for the image generation. + sig do + params(image_tokens: Integer, text_tokens: Integer).returns( + T.attached_class + ) + end + def self.new( + # The number of image tokens in the input prompt. + image_tokens:, + # The number of text tokens in the input prompt. + text_tokens: + ) + end + + sig do + override.returns({ image_tokens: Integer, text_tokens: Integer }) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/image_gen_partial_image_event.rbi b/rbi/openai/models/image_gen_partial_image_event.rbi new file mode 100644 index 00000000..c582e9c9 --- /dev/null +++ b/rbi/openai/models/image_gen_partial_image_event.rbi @@ -0,0 +1,243 @@ +# typed: strong + +module OpenAI + module Models + class ImageGenPartialImageEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ImageGenPartialImageEvent, OpenAI::Internal::AnyHash) + end + + # Base64-encoded partial image data, suitable for rendering as an image. + sig { returns(String) } + attr_accessor :b64_json + + # The background setting for the requested image. + sig do + returns(OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol) + end + attr_accessor :background + + # The Unix timestamp when the event was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The output format for the requested image. + sig do + returns(OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol) + end + attr_accessor :output_format + + # 0-based index for the partial image (streaming). + sig { returns(Integer) } + attr_accessor :partial_image_index + + # The quality setting for the requested image. + sig { returns(OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol) } + attr_accessor :quality + + # The size of the requested image. + sig { returns(OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol) } + attr_accessor :size + + # The type of the event. Always `image_generation.partial_image`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a partial image is available during image generation streaming. + sig do + params( + b64_json: String, + background: OpenAI::ImageGenPartialImageEvent::Background::OrSymbol, + created_at: Integer, + output_format: + OpenAI::ImageGenPartialImageEvent::OutputFormat::OrSymbol, + partial_image_index: Integer, + quality: OpenAI::ImageGenPartialImageEvent::Quality::OrSymbol, + size: OpenAI::ImageGenPartialImageEvent::Size::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Base64-encoded partial image data, suitable for rendering as an image. + b64_json:, + # The background setting for the requested image. + background:, + # The Unix timestamp when the event was created. + created_at:, + # The output format for the requested image. + output_format:, + # 0-based index for the partial image (streaming). + partial_image_index:, + # The quality setting for the requested image. + quality:, + # The size of the requested image. + size:, + # The type of the event. Always `image_generation.partial_image`. + type: :"image_generation.partial_image" + ) + end + + sig do + override.returns( + { + b64_json: String, + background: + OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol, + created_at: Integer, + output_format: + OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol, + partial_image_index: Integer, + quality: OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol, + size: OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The background setting for the requested image. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenPartialImageEvent::Background) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let( + :transparent, + OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol + ) + OPAQUE = + T.let( + :opaque, + OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageGenPartialImageEvent::Background::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The output format for the requested image. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenPartialImageEvent::OutputFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = + T.let( + :png, + OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol + ) + WEBP = + T.let( + :webp, + OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol + ) + JPEG = + T.let( + :jpeg, + OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::ImageGenPartialImageEvent::OutputFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The quality setting for the requested image. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenPartialImageEvent::Quality) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let(:low, OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol) + MEDIUM = + T.let( + :medium, + OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol + ) + HIGH = + T.let(:high, OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol) + AUTO = + T.let(:auto, OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenPartialImageEvent::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the requested image. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenPartialImageEvent::Size) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_1024X1024 = + T.let( + :"1024x1024", + OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol + ) + SIZE_1024X1536 = + T.let( + :"1024x1536", + OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol + ) + SIZE_1536X1024 = + T.let( + :"1536x1024", + OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol + ) + AUTO = + T.let(:auto, OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenPartialImageEvent::Size::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/image_gen_stream_event.rbi b/rbi/openai/models/image_gen_stream_event.rbi new file mode 100644 index 00000000..2309b993 --- /dev/null +++ b/rbi/openai/models/image_gen_stream_event.rbi @@ -0,0 +1,22 @@ +# typed: strong + +module OpenAI + module Models + # Emitted when a partial image is available during image generation streaming. + module ImageGenStreamEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::ImageGenPartialImageEvent, + OpenAI::ImageGenCompletedEvent + ) + end + + sig { override.returns(T::Array[OpenAI::ImageGenStreamEvent::Variants]) } + def self.variants + end + end + end +end diff --git a/rbi/openai/models/image_generate_params.rbi b/rbi/openai/models/image_generate_params.rbi new file mode 100644 index 00000000..c4111420 --- /dev/null +++ b/rbi/openai/models/image_generate_params.rbi @@ -0,0 +1,460 @@ +# typed: strong + +module OpenAI + module Models + class ImageGenerateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ImageGenerateParams, OpenAI::Internal::AnyHash) + end + + # A text description of the desired image(s). The maximum length is 32000 + # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + # for `dall-e-3`. + sig { returns(String) } + attr_accessor :prompt + + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + sig do + returns(T.nilable(OpenAI::ImageGenerateParams::Background::OrSymbol)) + end + attr_accessor :background + + # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + # `gpt-image-1` is used. + sig { returns(T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol))) } + attr_accessor :model + + # Control the content-moderation level for images generated by `gpt-image-1`. Must + # be either `low` for less restrictive filtering or `auto` (default value). + sig do + returns(T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol)) + end + attr_accessor :moderation + + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. + sig { returns(T.nilable(Integer)) } + attr_accessor :n + + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + sig { returns(T.nilable(Integer)) } + attr_accessor :output_compression + + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + sig do + returns(T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol)) + end + attr_accessor :output_format + + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # Note that the final image may be sent before the full number of partial images + # are generated if the full image is generated more quickly. + sig { returns(T.nilable(Integer)) } + attr_accessor :partial_images + + # The quality of the image that will be generated. + # + # - `auto` (default value) will automatically select the best quality for the + # given model. + # - `high`, `medium` and `low` are supported for `gpt-image-1`. + # - `hd` and `standard` are supported for `dall-e-3`. + # - `standard` is the only option for `dall-e-2`. + sig { returns(T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol)) } + attr_accessor :quality + + # The format in which generated images with `dall-e-2` and `dall-e-3` are + # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + # after the image has been generated. This parameter isn't supported for + # `gpt-image-1` which will always return base64-encoded images. + sig do + returns( + T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol) + ) + end + attr_accessor :response_format + + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + sig { returns(T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol)) } + attr_accessor :size + + # The style of the generated images. This parameter is only supported for + # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + # towards generating hyper-real and dramatic images. Natural causes the model to + # produce more natural, less hyper-real looking images. + sig { returns(T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol)) } + attr_accessor :style + + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + sig { returns(T.nilable(String)) } + attr_reader :user + + sig { params(user: String).void } + attr_writer :user + + sig do + params( + prompt: String, + background: + T.nilable(OpenAI::ImageGenerateParams::Background::OrSymbol), + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + moderation: + T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol), + n: T.nilable(Integer), + output_compression: T.nilable(Integer), + output_format: + T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), + quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol), + response_format: + T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol), + size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol), + style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol), + user: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A text description of the desired image(s). The maximum length is 32000 + # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + # for `dall-e-3`. + prompt:, + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + background: nil, + # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + # `gpt-image-1` is used. + model: nil, + # Control the content-moderation level for images generated by `gpt-image-1`. Must + # be either `low` for less restrictive filtering or `auto` (default value). + moderation: nil, + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. + n: nil, + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + output_compression: nil, + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # Note that the final image may be sent before the full number of partial images + # are generated if the full image is generated more quickly. + partial_images: nil, + # The quality of the image that will be generated. + # + # - `auto` (default value) will automatically select the best quality for the + # given model. + # - `high`, `medium` and `low` are supported for `gpt-image-1`. + # - `hd` and `standard` are supported for `dall-e-3`. + # - `standard` is the only option for `dall-e-2`. + quality: nil, + # The format in which generated images with `dall-e-2` and `dall-e-3` are + # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + # after the image has been generated. This parameter isn't supported for + # `gpt-image-1` which will always return base64-encoded images. + response_format: nil, + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + size: nil, + # The style of the generated images. This parameter is only supported for + # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + # towards generating hyper-real and dramatic images. Natural causes the model to + # produce more natural, less hyper-real looking images. + style: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + prompt: String, + background: + T.nilable(OpenAI::ImageGenerateParams::Background::OrSymbol), + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + moderation: + T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol), + n: T.nilable(Integer), + output_compression: T.nilable(Integer), + output_format: + T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), + quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol), + response_format: + T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol), + size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol), + style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol), + user: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenerateParams::Background) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let( + :transparent, + OpenAI::ImageGenerateParams::Background::TaggedSymbol + ) + OPAQUE = + T.let(:opaque, OpenAI::ImageGenerateParams::Background::TaggedSymbol) + AUTO = + T.let(:auto, OpenAI::ImageGenerateParams::Background::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenerateParams::Background::TaggedSymbol] + ) + end + def self.values + end + end + + # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + # `gpt-image-1` is used. + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::ImageModel::TaggedSymbol) } + + sig do + override.returns( + T::Array[OpenAI::ImageGenerateParams::Model::Variants] + ) + end + def self.variants + end + end + + # Control the content-moderation level for images generated by `gpt-image-1`. Must + # be either `low` for less restrictive filtering or `auto` (default value). + module Moderation + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenerateParams::Moderation) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = T.let(:low, OpenAI::ImageGenerateParams::Moderation::TaggedSymbol) + AUTO = + T.let(:auto, OpenAI::ImageGenerateParams::Moderation::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenerateParams::Moderation::TaggedSymbol] + ) + end + def self.values + end + end + + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenerateParams::OutputFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = + T.let(:png, OpenAI::ImageGenerateParams::OutputFormat::TaggedSymbol) + JPEG = + T.let(:jpeg, OpenAI::ImageGenerateParams::OutputFormat::TaggedSymbol) + WEBP = + T.let(:webp, OpenAI::ImageGenerateParams::OutputFormat::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenerateParams::OutputFormat::TaggedSymbol] + ) + end + def self.values + end + end + + # The quality of the image that will be generated. + # + # - `auto` (default value) will automatically select the best quality for the + # given model. + # - `high`, `medium` and `low` are supported for `gpt-image-1`. + # - `hd` and `standard` are supported for `dall-e-3`. + # - `standard` is the only option for `dall-e-2`. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageGenerateParams::Quality) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + STANDARD = + T.let(:standard, OpenAI::ImageGenerateParams::Quality::TaggedSymbol) + HD = T.let(:hd, OpenAI::ImageGenerateParams::Quality::TaggedSymbol) + LOW = T.let(:low, OpenAI::ImageGenerateParams::Quality::TaggedSymbol) + MEDIUM = + T.let(:medium, OpenAI::ImageGenerateParams::Quality::TaggedSymbol) + HIGH = T.let(:high, OpenAI::ImageGenerateParams::Quality::TaggedSymbol) + AUTO = T.let(:auto, OpenAI::ImageGenerateParams::Quality::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenerateParams::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The format in which generated images with `dall-e-2` and `dall-e-3` are + # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + # after the image has been generated. This parameter isn't supported for + # `gpt-image-1` which will always return base64-encoded images. + module ResponseFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ImageGenerateParams::ResponseFormat) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + URL = + T.let(:url, OpenAI::ImageGenerateParams::ResponseFormat::TaggedSymbol) + B64_JSON = + T.let( + :b64_json, + OpenAI::ImageGenerateParams::ResponseFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::ImageGenerateParams::ResponseFormat::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageGenerateParams::Size) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = T.let(:auto, OpenAI::ImageGenerateParams::Size::TaggedSymbol) + SIZE_1024X1024 = + T.let(:"1024x1024", OpenAI::ImageGenerateParams::Size::TaggedSymbol) + SIZE_1536X1024 = + T.let(:"1536x1024", OpenAI::ImageGenerateParams::Size::TaggedSymbol) + SIZE_1024X1536 = + T.let(:"1024x1536", OpenAI::ImageGenerateParams::Size::TaggedSymbol) + SIZE_256X256 = + T.let(:"256x256", OpenAI::ImageGenerateParams::Size::TaggedSymbol) + SIZE_512X512 = + T.let(:"512x512", OpenAI::ImageGenerateParams::Size::TaggedSymbol) + SIZE_1792X1024 = + T.let(:"1792x1024", OpenAI::ImageGenerateParams::Size::TaggedSymbol) + SIZE_1024X1792 = + T.let(:"1024x1792", OpenAI::ImageGenerateParams::Size::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenerateParams::Size::TaggedSymbol] + ) + end + def self.values + end + end + + # The style of the generated images. This parameter is only supported for + # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + # towards generating hyper-real and dramatic images. Natural causes the model to + # produce more natural, less hyper-real looking images. + module Style + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImageGenerateParams::Style) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + VIVID = T.let(:vivid, OpenAI::ImageGenerateParams::Style::TaggedSymbol) + NATURAL = + T.let(:natural, OpenAI::ImageGenerateParams::Style::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImageGenerateParams::Style::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/image_model.rbi b/rbi/openai/models/image_model.rbi new file mode 100644 index 00000000..dabb10a7 --- /dev/null +++ b/rbi/openai/models/image_model.rbi @@ -0,0 +1,20 @@ +# typed: strong + +module OpenAI + module Models + module ImageModel + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ImageModel) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + DALL_E_2 = T.let(:"dall-e-2", OpenAI::ImageModel::TaggedSymbol) + DALL_E_3 = T.let(:"dall-e-3", OpenAI::ImageModel::TaggedSymbol) + GPT_IMAGE_1 = T.let(:"gpt-image-1", OpenAI::ImageModel::TaggedSymbol) + + sig { override.returns(T::Array[OpenAI::ImageModel::TaggedSymbol]) } + def self.values + end + end + end +end diff --git a/rbi/openai/models/images_response.rbi b/rbi/openai/models/images_response.rbi new file mode 100644 index 00000000..68ca057a --- /dev/null +++ b/rbi/openai/models/images_response.rbi @@ -0,0 +1,311 @@ +# typed: strong + +module OpenAI + module Models + class ImagesResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ImagesResponse, OpenAI::Internal::AnyHash) + end + + # The Unix timestamp (in seconds) of when the image was created. + sig { returns(Integer) } + attr_accessor :created + + # The background parameter used for the image generation. Either `transparent` or + # `opaque`. + sig do + returns(T.nilable(OpenAI::ImagesResponse::Background::TaggedSymbol)) + end + attr_reader :background + + sig do + params(background: OpenAI::ImagesResponse::Background::OrSymbol).void + end + attr_writer :background + + # The list of generated images. + sig { returns(T.nilable(T::Array[OpenAI::Image])) } + attr_reader :data + + sig { params(data: T::Array[OpenAI::Image::OrHash]).void } + attr_writer :data + + # The output format of the image generation. Either `png`, `webp`, or `jpeg`. + sig do + returns(T.nilable(OpenAI::ImagesResponse::OutputFormat::TaggedSymbol)) + end + attr_reader :output_format + + sig do + params( + output_format: OpenAI::ImagesResponse::OutputFormat::OrSymbol + ).void + end + attr_writer :output_format + + # The quality of the image generated. Either `low`, `medium`, or `high`. + sig { returns(T.nilable(OpenAI::ImagesResponse::Quality::TaggedSymbol)) } + attr_reader :quality + + sig { params(quality: OpenAI::ImagesResponse::Quality::OrSymbol).void } + attr_writer :quality + + # The size of the image generated. Either `1024x1024`, `1024x1536`, or + # `1536x1024`. + sig { returns(T.nilable(OpenAI::ImagesResponse::Size::TaggedSymbol)) } + attr_reader :size + + sig { params(size: OpenAI::ImagesResponse::Size::OrSymbol).void } + attr_writer :size + + # For `gpt-image-1` only, the token usage information for the image generation. + sig { returns(T.nilable(OpenAI::ImagesResponse::Usage)) } + attr_reader :usage + + sig { params(usage: OpenAI::ImagesResponse::Usage::OrHash).void } + attr_writer :usage + + # The response from the image generation endpoint. + sig do + params( + created: Integer, + background: OpenAI::ImagesResponse::Background::OrSymbol, + data: T::Array[OpenAI::Image::OrHash], + output_format: OpenAI::ImagesResponse::OutputFormat::OrSymbol, + quality: OpenAI::ImagesResponse::Quality::OrSymbol, + size: OpenAI::ImagesResponse::Size::OrSymbol, + usage: OpenAI::ImagesResponse::Usage::OrHash + ).returns(T.attached_class) + end + def self.new( + # The Unix timestamp (in seconds) of when the image was created. + created:, + # The background parameter used for the image generation. Either `transparent` or + # `opaque`. + background: nil, + # The list of generated images. + data: nil, + # The output format of the image generation. Either `png`, `webp`, or `jpeg`. + output_format: nil, + # The quality of the image generated. Either `low`, `medium`, or `high`. + quality: nil, + # The size of the image generated. Either `1024x1024`, `1024x1536`, or + # `1536x1024`. + size: nil, + # For `gpt-image-1` only, the token usage information for the image generation. + usage: nil + ) + end + + sig do + override.returns( + { + created: Integer, + background: OpenAI::ImagesResponse::Background::TaggedSymbol, + data: T::Array[OpenAI::Image], + output_format: OpenAI::ImagesResponse::OutputFormat::TaggedSymbol, + quality: OpenAI::ImagesResponse::Quality::TaggedSymbol, + size: OpenAI::ImagesResponse::Size::TaggedSymbol, + usage: OpenAI::ImagesResponse::Usage + } + ) + end + def to_hash + end + + # The background parameter used for the image generation. Either `transparent` or + # `opaque`. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Background) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let(:transparent, OpenAI::ImagesResponse::Background::TaggedSymbol) + OPAQUE = + T.let(:opaque, OpenAI::ImagesResponse::Background::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImagesResponse::Background::TaggedSymbol] + ) + end + def self.values + end + end + + # The output format of the image generation. Either `png`, `webp`, or `jpeg`. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::OutputFormat) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = T.let(:png, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol) + WEBP = T.let(:webp, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol) + JPEG = T.let(:jpeg, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImagesResponse::OutputFormat::TaggedSymbol] + ) + end + def self.values + end + end + + # The quality of the image generated. Either `low`, `medium`, or `high`. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Quality) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = T.let(:low, OpenAI::ImagesResponse::Quality::TaggedSymbol) + MEDIUM = T.let(:medium, OpenAI::ImagesResponse::Quality::TaggedSymbol) + HIGH = T.let(:high, OpenAI::ImagesResponse::Quality::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::ImagesResponse::Quality::TaggedSymbol] + ) + end + def self.values + end + end + + # The size of the image generated. Either `1024x1024`, `1024x1536`, or + # `1536x1024`. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Size) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_1024X1024 = + T.let(:"1024x1024", OpenAI::ImagesResponse::Size::TaggedSymbol) + SIZE_1024X1536 = + T.let(:"1024x1536", OpenAI::ImagesResponse::Size::TaggedSymbol) + SIZE_1536X1024 = + T.let(:"1536x1024", OpenAI::ImagesResponse::Size::TaggedSymbol) + + sig do + override.returns(T::Array[OpenAI::ImagesResponse::Size::TaggedSymbol]) + end + def self.values + end + end + + class Usage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ImagesResponse::Usage, OpenAI::Internal::AnyHash) + end + + # The number of tokens (images and text) in the input prompt. + sig { returns(Integer) } + attr_accessor :input_tokens + + # The input tokens detailed information for the image generation. + sig { returns(OpenAI::ImagesResponse::Usage::InputTokensDetails) } + attr_reader :input_tokens_details + + sig do + params( + input_tokens_details: + OpenAI::ImagesResponse::Usage::InputTokensDetails::OrHash + ).void + end + attr_writer :input_tokens_details + + # The number of output tokens generated by the model. + sig { returns(Integer) } + attr_accessor :output_tokens + + # The total number of tokens (images and text) used for the image generation. + sig { returns(Integer) } + attr_accessor :total_tokens + + # For `gpt-image-1` only, the token usage information for the image generation. + sig do + params( + input_tokens: Integer, + input_tokens_details: + OpenAI::ImagesResponse::Usage::InputTokensDetails::OrHash, + output_tokens: Integer, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens (images and text) in the input prompt. + input_tokens:, + # The input tokens detailed information for the image generation. + input_tokens_details:, + # The number of output tokens generated by the model. + output_tokens:, + # The total number of tokens (images and text) used for the image generation. + total_tokens: + ) + end + + sig do + override.returns( + { + input_tokens: Integer, + input_tokens_details: + OpenAI::ImagesResponse::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + ) + end + def to_hash + end + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ImagesResponse::Usage::InputTokensDetails, + OpenAI::Internal::AnyHash + ) + end + + # The number of image tokens in the input prompt. + sig { returns(Integer) } + attr_accessor :image_tokens + + # The number of text tokens in the input prompt. + sig { returns(Integer) } + attr_accessor :text_tokens + + # The input tokens detailed information for the image generation. + sig do + params(image_tokens: Integer, text_tokens: Integer).returns( + T.attached_class + ) + end + def self.new( + # The number of image tokens in the input prompt. + image_tokens:, + # The number of text tokens in the input prompt. + text_tokens: + ) + end + + sig do + override.returns({ image_tokens: Integer, text_tokens: Integer }) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/metadata.rbi b/rbi/openai/models/metadata.rbi new file mode 100644 index 00000000..82208718 --- /dev/null +++ b/rbi/openai/models/metadata.rbi @@ -0,0 +1,11 @@ +# typed: strong + +module OpenAI + module Models + Metadata = + T.let( + OpenAI::Internal::Type::HashOf[String], + OpenAI::Internal::Type::Converter + ) + end +end diff --git a/rbi/openai/models/model.rbi b/rbi/openai/models/model.rbi new file mode 100644 index 00000000..e84f4d72 --- /dev/null +++ b/rbi/openai/models/model.rbi @@ -0,0 +1,54 @@ +# typed: strong + +module OpenAI + module Models + class Model < OpenAI::Internal::Type::BaseModel + OrHash = T.type_alias { T.any(OpenAI::Model, OpenAI::Internal::AnyHash) } + + # The model identifier, which can be referenced in the API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) when the model was created. + sig { returns(Integer) } + attr_accessor :created + + # The object type, which is always "model". + sig { returns(Symbol) } + attr_accessor :object + + # The organization that owns the model. + sig { returns(String) } + attr_accessor :owned_by + + # Describes an OpenAI model offering that can be used with the API. + sig do + params( + id: String, + created: Integer, + owned_by: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The model identifier, which can be referenced in the API endpoints. + id:, + # The Unix timestamp (in seconds) when the model was created. + created:, + # The organization that owns the model. + owned_by:, + # The object type, which is always "model". + object: :model + ) + end + + sig do + override.returns( + { id: String, created: Integer, object: Symbol, owned_by: String } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/model_delete_params.rbi b/rbi/openai/models/model_delete_params.rbi new file mode 100644 index 00000000..50bdedfa --- /dev/null +++ b/rbi/openai/models/model_delete_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class ModelDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ModelDeleteParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/model_deleted.rbi b/rbi/openai/models/model_deleted.rbi new file mode 100644 index 00000000..611d54d9 --- /dev/null +++ b/rbi/openai/models/model_deleted.rbi @@ -0,0 +1,33 @@ +# typed: strong + +module OpenAI + module Models + class ModelDeleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::ModelDeleted, OpenAI::Internal::AnyHash) } + + sig { returns(String) } + attr_accessor :id + + sig { returns(T::Boolean) } + attr_accessor :deleted + + sig { returns(String) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: String).returns( + T.attached_class + ) + end + def self.new(id:, deleted:, object:) + end + + sig do + override.returns({ id: String, deleted: T::Boolean, object: String }) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/model_list_params.rbi b/rbi/openai/models/model_list_params.rbi new file mode 100644 index 00000000..c07911ee --- /dev/null +++ b/rbi/openai/models/model_list_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class ModelListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ModelListParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/model_retrieve_params.rbi b/rbi/openai/models/model_retrieve_params.rbi new file mode 100644 index 00000000..9d8d4c05 --- /dev/null +++ b/rbi/openai/models/model_retrieve_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class ModelRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ModelRetrieveParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/moderation.rbi b/rbi/openai/models/moderation.rbi new file mode 100644 index 00000000..b6e45d8b --- /dev/null +++ b/rbi/openai/models/moderation.rbi @@ -0,0 +1,1061 @@ +# typed: strong + +module OpenAI + module Models + class Moderation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::Moderation, OpenAI::Internal::AnyHash) } + + # A list of the categories, and whether they are flagged or not. + sig { returns(OpenAI::Moderation::Categories) } + attr_reader :categories + + sig { params(categories: OpenAI::Moderation::Categories::OrHash).void } + attr_writer :categories + + # A list of the categories along with the input type(s) that the score applies to. + sig { returns(OpenAI::Moderation::CategoryAppliedInputTypes) } + attr_reader :category_applied_input_types + + sig do + params( + category_applied_input_types: + OpenAI::Moderation::CategoryAppliedInputTypes::OrHash + ).void + end + attr_writer :category_applied_input_types + + # A list of the categories along with their scores as predicted by model. + sig { returns(OpenAI::Moderation::CategoryScores) } + attr_reader :category_scores + + sig do + params(category_scores: OpenAI::Moderation::CategoryScores::OrHash).void + end + attr_writer :category_scores + + # Whether any of the below categories are flagged. + sig { returns(T::Boolean) } + attr_accessor :flagged + + sig do + params( + categories: OpenAI::Moderation::Categories::OrHash, + category_applied_input_types: + OpenAI::Moderation::CategoryAppliedInputTypes::OrHash, + category_scores: OpenAI::Moderation::CategoryScores::OrHash, + flagged: T::Boolean + ).returns(T.attached_class) + end + def self.new( + # A list of the categories, and whether they are flagged or not. + categories:, + # A list of the categories along with the input type(s) that the score applies to. + category_applied_input_types:, + # A list of the categories along with their scores as predicted by model. + category_scores:, + # Whether any of the below categories are flagged. + flagged: + ) + end + + sig do + override.returns( + { + categories: OpenAI::Moderation::Categories, + category_applied_input_types: + OpenAI::Moderation::CategoryAppliedInputTypes, + category_scores: OpenAI::Moderation::CategoryScores, + flagged: T::Boolean + } + ) + end + def to_hash + end + + class Categories < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Moderation::Categories, OpenAI::Internal::AnyHash) + end + + # Content that expresses, incites, or promotes harassing language towards any + # target. + sig { returns(T::Boolean) } + attr_accessor :harassment + + # Harassment content that also includes violence or serious harm towards any + # target. + sig { returns(T::Boolean) } + attr_accessor :harassment_threatening + + # Content that expresses, incites, or promotes hate based on race, gender, + # ethnicity, religion, nationality, sexual orientation, disability status, or + # caste. Hateful content aimed at non-protected groups (e.g., chess players) is + # harassment. + sig { returns(T::Boolean) } + attr_accessor :hate + + # Hateful content that also includes violence or serious harm towards the targeted + # group based on race, gender, ethnicity, religion, nationality, sexual + # orientation, disability status, or caste. + sig { returns(T::Boolean) } + attr_accessor :hate_threatening + + # Content that includes instructions or advice that facilitate the planning or + # execution of wrongdoing, or that gives advice or instruction on how to commit + # illicit acts. For example, "how to shoplift" would fit this category. + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :illicit + + # Content that includes instructions or advice that facilitate the planning or + # execution of wrongdoing that also includes violence, or that gives advice or + # instruction on the procurement of any weapon. + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :illicit_violent + + # Content that promotes, encourages, or depicts acts of self-harm, such as + # suicide, cutting, and eating disorders. + sig { returns(T::Boolean) } + attr_accessor :self_harm + + # Content that encourages performing acts of self-harm, such as suicide, cutting, + # and eating disorders, or that gives instructions or advice on how to commit such + # acts. + sig { returns(T::Boolean) } + attr_accessor :self_harm_instructions + + # Content where the speaker expresses that they are engaging or intend to engage + # in acts of self-harm, such as suicide, cutting, and eating disorders. + sig { returns(T::Boolean) } + attr_accessor :self_harm_intent + + # Content meant to arouse sexual excitement, such as the description of sexual + # activity, or that promotes sexual services (excluding sex education and + # wellness). + sig { returns(T::Boolean) } + attr_accessor :sexual + + # Sexual content that includes an individual who is under 18 years old. + sig { returns(T::Boolean) } + attr_accessor :sexual_minors + + # Content that depicts death, violence, or physical injury. + sig { returns(T::Boolean) } + attr_accessor :violence + + # Content that depicts death, violence, or physical injury in graphic detail. + sig { returns(T::Boolean) } + attr_accessor :violence_graphic + + # A list of the categories, and whether they are flagged or not. + sig do + params( + harassment: T::Boolean, + harassment_threatening: T::Boolean, + hate: T::Boolean, + hate_threatening: T::Boolean, + illicit: T.nilable(T::Boolean), + illicit_violent: T.nilable(T::Boolean), + self_harm: T::Boolean, + self_harm_instructions: T::Boolean, + self_harm_intent: T::Boolean, + sexual: T::Boolean, + sexual_minors: T::Boolean, + violence: T::Boolean, + violence_graphic: T::Boolean + ).returns(T.attached_class) + end + def self.new( + # Content that expresses, incites, or promotes harassing language towards any + # target. + harassment:, + # Harassment content that also includes violence or serious harm towards any + # target. + harassment_threatening:, + # Content that expresses, incites, or promotes hate based on race, gender, + # ethnicity, religion, nationality, sexual orientation, disability status, or + # caste. Hateful content aimed at non-protected groups (e.g., chess players) is + # harassment. + hate:, + # Hateful content that also includes violence or serious harm towards the targeted + # group based on race, gender, ethnicity, religion, nationality, sexual + # orientation, disability status, or caste. + hate_threatening:, + # Content that includes instructions or advice that facilitate the planning or + # execution of wrongdoing, or that gives advice or instruction on how to commit + # illicit acts. For example, "how to shoplift" would fit this category. + illicit:, + # Content that includes instructions or advice that facilitate the planning or + # execution of wrongdoing that also includes violence, or that gives advice or + # instruction on the procurement of any weapon. + illicit_violent:, + # Content that promotes, encourages, or depicts acts of self-harm, such as + # suicide, cutting, and eating disorders. + self_harm:, + # Content that encourages performing acts of self-harm, such as suicide, cutting, + # and eating disorders, or that gives instructions or advice on how to commit such + # acts. + self_harm_instructions:, + # Content where the speaker expresses that they are engaging or intend to engage + # in acts of self-harm, such as suicide, cutting, and eating disorders. + self_harm_intent:, + # Content meant to arouse sexual excitement, such as the description of sexual + # activity, or that promotes sexual services (excluding sex education and + # wellness). + sexual:, + # Sexual content that includes an individual who is under 18 years old. + sexual_minors:, + # Content that depicts death, violence, or physical injury. + violence:, + # Content that depicts death, violence, or physical injury in graphic detail. + violence_graphic: + ) + end + + sig do + override.returns( + { + harassment: T::Boolean, + harassment_threatening: T::Boolean, + hate: T::Boolean, + hate_threatening: T::Boolean, + illicit: T.nilable(T::Boolean), + illicit_violent: T.nilable(T::Boolean), + self_harm: T::Boolean, + self_harm_instructions: T::Boolean, + self_harm_intent: T::Boolean, + sexual: T::Boolean, + sexual_minors: T::Boolean, + violence: T::Boolean, + violence_graphic: T::Boolean + } + ) + end + def to_hash + end + end + + class CategoryAppliedInputTypes < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Moderation::CategoryAppliedInputTypes, + OpenAI::Internal::AnyHash + ) + end + + # The applied input type(s) for the category 'harassment'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Harassment::TaggedSymbol + ] + ) + end + attr_accessor :harassment + + # The applied input type(s) for the category 'harassment/threatening'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::HarassmentThreatening::TaggedSymbol + ] + ) + end + attr_accessor :harassment_threatening + + # The applied input type(s) for the category 'hate'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Hate::TaggedSymbol + ] + ) + end + attr_accessor :hate + + # The applied input type(s) for the category 'hate/threatening'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::HateThreatening::TaggedSymbol + ] + ) + end + attr_accessor :hate_threatening + + # The applied input type(s) for the category 'illicit'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Illicit::TaggedSymbol + ] + ) + end + attr_accessor :illicit + + # The applied input type(s) for the category 'illicit/violent'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::IllicitViolent::TaggedSymbol + ] + ) + end + attr_accessor :illicit_violent + + # The applied input type(s) for the category 'self-harm'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarm::TaggedSymbol + ] + ) + end + attr_accessor :self_harm + + # The applied input type(s) for the category 'self-harm/instructions'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction::TaggedSymbol + ] + ) + end + attr_accessor :self_harm_instructions + + # The applied input type(s) for the category 'self-harm/intent'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmIntent::TaggedSymbol + ] + ) + end + attr_accessor :self_harm_intent + + # The applied input type(s) for the category 'sexual'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Sexual::TaggedSymbol + ] + ) + end + attr_accessor :sexual + + # The applied input type(s) for the category 'sexual/minors'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SexualMinor::TaggedSymbol + ] + ) + end + attr_accessor :sexual_minors + + # The applied input type(s) for the category 'violence'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Violence::TaggedSymbol + ] + ) + end + attr_accessor :violence + + # The applied input type(s) for the category 'violence/graphic'. + sig do + returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::ViolenceGraphic::TaggedSymbol + ] + ) + end + attr_accessor :violence_graphic + + # A list of the categories along with the input type(s) that the score applies to. + sig do + params( + harassment: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Harassment::OrSymbol + ], + harassment_threatening: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::HarassmentThreatening::OrSymbol + ], + hate: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Hate::OrSymbol + ], + hate_threatening: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::HateThreatening::OrSymbol + ], + illicit: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Illicit::OrSymbol + ], + illicit_violent: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::IllicitViolent::OrSymbol + ], + self_harm: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarm::OrSymbol + ], + self_harm_instructions: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction::OrSymbol + ], + self_harm_intent: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmIntent::OrSymbol + ], + sexual: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Sexual::OrSymbol + ], + sexual_minors: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SexualMinor::OrSymbol + ], + violence: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Violence::OrSymbol + ], + violence_graphic: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::ViolenceGraphic::OrSymbol + ] + ).returns(T.attached_class) + end + def self.new( + # The applied input type(s) for the category 'harassment'. + harassment:, + # The applied input type(s) for the category 'harassment/threatening'. + harassment_threatening:, + # The applied input type(s) for the category 'hate'. + hate:, + # The applied input type(s) for the category 'hate/threatening'. + hate_threatening:, + # The applied input type(s) for the category 'illicit'. + illicit:, + # The applied input type(s) for the category 'illicit/violent'. + illicit_violent:, + # The applied input type(s) for the category 'self-harm'. + self_harm:, + # The applied input type(s) for the category 'self-harm/instructions'. + self_harm_instructions:, + # The applied input type(s) for the category 'self-harm/intent'. + self_harm_intent:, + # The applied input type(s) for the category 'sexual'. + sexual:, + # The applied input type(s) for the category 'sexual/minors'. + sexual_minors:, + # The applied input type(s) for the category 'violence'. + violence:, + # The applied input type(s) for the category 'violence/graphic'. + violence_graphic: + ) + end + + sig do + override.returns( + { + harassment: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Harassment::TaggedSymbol + ], + harassment_threatening: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::HarassmentThreatening::TaggedSymbol + ], + hate: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Hate::TaggedSymbol + ], + hate_threatening: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::HateThreatening::TaggedSymbol + ], + illicit: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Illicit::TaggedSymbol + ], + illicit_violent: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::IllicitViolent::TaggedSymbol + ], + self_harm: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarm::TaggedSymbol + ], + self_harm_instructions: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction::TaggedSymbol + ], + self_harm_intent: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmIntent::TaggedSymbol + ], + sexual: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Sexual::TaggedSymbol + ], + sexual_minors: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SexualMinor::TaggedSymbol + ], + violence: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Violence::TaggedSymbol + ], + violence_graphic: + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::ViolenceGraphic::TaggedSymbol + ] + } + ) + end + def to_hash + end + + module Harassment + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::Harassment + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::Harassment::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Harassment::TaggedSymbol + ] + ) + end + def self.values + end + end + + module HarassmentThreatening + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::HarassmentThreatening + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::HarassmentThreatening::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::HarassmentThreatening::TaggedSymbol + ] + ) + end + def self.values + end + end + + module Hate + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Moderation::CategoryAppliedInputTypes::Hate) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::Hate::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Hate::TaggedSymbol + ] + ) + end + def self.values + end + end + + module HateThreatening + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::HateThreatening + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::HateThreatening::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::HateThreatening::TaggedSymbol + ] + ) + end + def self.values + end + end + + module Illicit + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::Illicit + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::Illicit::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Illicit::TaggedSymbol + ] + ) + end + def self.values + end + end + + module IllicitViolent + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::IllicitViolent + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::IllicitViolent::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::IllicitViolent::TaggedSymbol + ] + ) + end + def self.values + end + end + + module SelfHarm + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarm + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarm::TaggedSymbol + ) + IMAGE = + T.let( + :image, + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarm::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarm::TaggedSymbol + ] + ) + end + def self.values + end + end + + module SelfHarmInstruction + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction::TaggedSymbol + ) + IMAGE = + T.let( + :image, + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction::TaggedSymbol + ] + ) + end + def self.values + end + end + + module SelfHarmIntent + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmIntent + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmIntent::TaggedSymbol + ) + IMAGE = + T.let( + :image, + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmIntent::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmIntent::TaggedSymbol + ] + ) + end + def self.values + end + end + + module Sexual + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::Sexual + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::Sexual::TaggedSymbol + ) + IMAGE = + T.let( + :image, + OpenAI::Moderation::CategoryAppliedInputTypes::Sexual::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Sexual::TaggedSymbol + ] + ) + end + def self.values + end + end + + module SexualMinor + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::SexualMinor + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::SexualMinor::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::SexualMinor::TaggedSymbol + ] + ) + end + def self.values + end + end + + module Violence + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::Violence + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::Violence::TaggedSymbol + ) + IMAGE = + T.let( + :image, + OpenAI::Moderation::CategoryAppliedInputTypes::Violence::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::Violence::TaggedSymbol + ] + ) + end + def self.values + end + end + + module ViolenceGraphic + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Moderation::CategoryAppliedInputTypes::ViolenceGraphic + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Moderation::CategoryAppliedInputTypes::ViolenceGraphic::TaggedSymbol + ) + IMAGE = + T.let( + :image, + OpenAI::Moderation::CategoryAppliedInputTypes::ViolenceGraphic::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Moderation::CategoryAppliedInputTypes::ViolenceGraphic::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class CategoryScores < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Moderation::CategoryScores, OpenAI::Internal::AnyHash) + end + + # The score for the category 'harassment'. + sig { returns(Float) } + attr_accessor :harassment + + # The score for the category 'harassment/threatening'. + sig { returns(Float) } + attr_accessor :harassment_threatening + + # The score for the category 'hate'. + sig { returns(Float) } + attr_accessor :hate + + # The score for the category 'hate/threatening'. + sig { returns(Float) } + attr_accessor :hate_threatening + + # The score for the category 'illicit'. + sig { returns(Float) } + attr_accessor :illicit + + # The score for the category 'illicit/violent'. + sig { returns(Float) } + attr_accessor :illicit_violent + + # The score for the category 'self-harm'. + sig { returns(Float) } + attr_accessor :self_harm + + # The score for the category 'self-harm/instructions'. + sig { returns(Float) } + attr_accessor :self_harm_instructions + + # The score for the category 'self-harm/intent'. + sig { returns(Float) } + attr_accessor :self_harm_intent + + # The score for the category 'sexual'. + sig { returns(Float) } + attr_accessor :sexual + + # The score for the category 'sexual/minors'. + sig { returns(Float) } + attr_accessor :sexual_minors + + # The score for the category 'violence'. + sig { returns(Float) } + attr_accessor :violence + + # The score for the category 'violence/graphic'. + sig { returns(Float) } + attr_accessor :violence_graphic + + # A list of the categories along with their scores as predicted by model. + sig do + params( + harassment: Float, + harassment_threatening: Float, + hate: Float, + hate_threatening: Float, + illicit: Float, + illicit_violent: Float, + self_harm: Float, + self_harm_instructions: Float, + self_harm_intent: Float, + sexual: Float, + sexual_minors: Float, + violence: Float, + violence_graphic: Float + ).returns(T.attached_class) + end + def self.new( + # The score for the category 'harassment'. + harassment:, + # The score for the category 'harassment/threatening'. + harassment_threatening:, + # The score for the category 'hate'. + hate:, + # The score for the category 'hate/threatening'. + hate_threatening:, + # The score for the category 'illicit'. + illicit:, + # The score for the category 'illicit/violent'. + illicit_violent:, + # The score for the category 'self-harm'. + self_harm:, + # The score for the category 'self-harm/instructions'. + self_harm_instructions:, + # The score for the category 'self-harm/intent'. + self_harm_intent:, + # The score for the category 'sexual'. + sexual:, + # The score for the category 'sexual/minors'. + sexual_minors:, + # The score for the category 'violence'. + violence:, + # The score for the category 'violence/graphic'. + violence_graphic: + ) + end + + sig do + override.returns( + { + harassment: Float, + harassment_threatening: Float, + hate: Float, + hate_threatening: Float, + illicit: Float, + illicit_violent: Float, + self_harm: Float, + self_harm_instructions: Float, + self_harm_intent: Float, + sexual: Float, + sexual_minors: Float, + violence: Float, + violence_graphic: Float + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/moderation_create_params.rbi b/rbi/openai/models/moderation_create_params.rbi new file mode 100644 index 00000000..0d5c60fa --- /dev/null +++ b/rbi/openai/models/moderation_create_params.rbi @@ -0,0 +1,122 @@ +# typed: strong + +module OpenAI + module Models + class ModerationCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::ModerationCreateParams, OpenAI::Internal::AnyHash) + end + + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. + sig { returns(OpenAI::ModerationCreateParams::Input::Variants) } + attr_accessor :input + + # The content moderation model you would like to use. Learn more in + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). + sig do + returns(T.nilable(T.any(String, OpenAI::ModerationModel::OrSymbol))) + end + attr_reader :model + + sig do + params(model: T.any(String, OpenAI::ModerationModel::OrSymbol)).void + end + attr_writer :model + + sig do + params( + input: OpenAI::ModerationCreateParams::Input::Variants, + model: T.any(String, OpenAI::ModerationModel::OrSymbol), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. + input:, + # The content moderation model you would like to use. Learn more in + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). + model: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + input: OpenAI::ModerationCreateParams::Input::Variants, + model: T.any(String, OpenAI::ModerationModel::OrSymbol), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. + module Input + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[String], + T::Array[OpenAI::ModerationMultiModalInput::Variants] + ) + end + + sig do + override.returns( + T::Array[OpenAI::ModerationCreateParams::Input::Variants] + ) + end + def self.variants + end + + StringArray = + T.let( + OpenAI::Internal::Type::ArrayOf[String], + OpenAI::Internal::Type::Converter + ) + + ModerationMultiModalInputArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::ModerationMultiModalInput + ], + OpenAI::Internal::Type::Converter + ) + end + + # The content moderation model you would like to use. Learn more in + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). + module Model + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias { T.any(String, OpenAI::ModerationModel::TaggedSymbol) } + + sig do + override.returns( + T::Array[OpenAI::ModerationCreateParams::Model::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/moderation_create_response.rbi b/rbi/openai/models/moderation_create_response.rbi new file mode 100644 index 00000000..bdf51d1c --- /dev/null +++ b/rbi/openai/models/moderation_create_response.rbi @@ -0,0 +1,53 @@ +# typed: strong + +module OpenAI + module Models + class ModerationCreateResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::ModerationCreateResponse, + OpenAI::Internal::AnyHash + ) + end + + # The unique identifier for the moderation request. + sig { returns(String) } + attr_accessor :id + + # The model used to generate the moderation results. + sig { returns(String) } + attr_accessor :model + + # A list of moderation objects. + sig { returns(T::Array[OpenAI::Moderation]) } + attr_accessor :results + + # Represents if a given text input is potentially harmful. + sig do + params( + id: String, + model: String, + results: T::Array[OpenAI::Moderation::OrHash] + ).returns(T.attached_class) + end + def self.new( + # The unique identifier for the moderation request. + id:, + # The model used to generate the moderation results. + model:, + # A list of moderation objects. + results: + ) + end + + sig do + override.returns( + { id: String, model: String, results: T::Array[OpenAI::Moderation] } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/moderation_image_url_input.rbi b/rbi/openai/models/moderation_image_url_input.rbi new file mode 100644 index 00000000..4867a348 --- /dev/null +++ b/rbi/openai/models/moderation_image_url_input.rbi @@ -0,0 +1,76 @@ +# typed: strong + +module OpenAI + module Models + class ModerationImageURLInput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ModerationImageURLInput, OpenAI::Internal::AnyHash) + end + + # Contains either an image URL or a data URL for a base64 encoded image. + sig { returns(OpenAI::ModerationImageURLInput::ImageURL) } + attr_reader :image_url + + sig do + params( + image_url: OpenAI::ModerationImageURLInput::ImageURL::OrHash + ).void + end + attr_writer :image_url + + # Always `image_url`. + sig { returns(Symbol) } + attr_accessor :type + + # An object describing an image to classify. + sig do + params( + image_url: OpenAI::ModerationImageURLInput::ImageURL::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Contains either an image URL or a data URL for a base64 encoded image. + image_url:, + # Always `image_url`. + type: :image_url + ) + end + + sig do + override.returns( + { image_url: OpenAI::ModerationImageURLInput::ImageURL, type: Symbol } + ) + end + def to_hash + end + + class ImageURL < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ModerationImageURLInput::ImageURL, + OpenAI::Internal::AnyHash + ) + end + + # Either a URL of the image or the base64 encoded image data. + sig { returns(String) } + attr_accessor :url + + # Contains either an image URL or a data URL for a base64 encoded image. + sig { params(url: String).returns(T.attached_class) } + def self.new( + # Either a URL of the image or the base64 encoded image data. + url: + ) + end + + sig { override.returns({ url: String }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/moderation_model.rbi b/rbi/openai/models/moderation_model.rbi new file mode 100644 index 00000000..2e122275 --- /dev/null +++ b/rbi/openai/models/moderation_model.rbi @@ -0,0 +1,28 @@ +# typed: strong + +module OpenAI + module Models + module ModerationModel + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ModerationModel) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + OMNI_MODERATION_LATEST = + T.let(:"omni-moderation-latest", OpenAI::ModerationModel::TaggedSymbol) + OMNI_MODERATION_2024_09_26 = + T.let( + :"omni-moderation-2024-09-26", + OpenAI::ModerationModel::TaggedSymbol + ) + TEXT_MODERATION_LATEST = + T.let(:"text-moderation-latest", OpenAI::ModerationModel::TaggedSymbol) + TEXT_MODERATION_STABLE = + T.let(:"text-moderation-stable", OpenAI::ModerationModel::TaggedSymbol) + + sig { override.returns(T::Array[OpenAI::ModerationModel::TaggedSymbol]) } + def self.values + end + end + end +end diff --git a/rbi/openai/models/moderation_multi_modal_input.rbi b/rbi/openai/models/moderation_multi_modal_input.rbi new file mode 100644 index 00000000..9c3ad7a6 --- /dev/null +++ b/rbi/openai/models/moderation_multi_modal_input.rbi @@ -0,0 +1,21 @@ +# typed: strong + +module OpenAI + module Models + # An object describing an image to classify. + module ModerationMultiModalInput + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any(OpenAI::ModerationImageURLInput, OpenAI::ModerationTextInput) + end + + sig do + override.returns(T::Array[OpenAI::ModerationMultiModalInput::Variants]) + end + def self.variants + end + end + end +end diff --git a/rbi/openai/models/moderation_text_input.rbi b/rbi/openai/models/moderation_text_input.rbi new file mode 100644 index 00000000..7c08a607 --- /dev/null +++ b/rbi/openai/models/moderation_text_input.rbi @@ -0,0 +1,34 @@ +# typed: strong + +module OpenAI + module Models + class ModerationTextInput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ModerationTextInput, OpenAI::Internal::AnyHash) + end + + # A string of text to classify. + sig { returns(String) } + attr_accessor :text + + # Always `text`. + sig { returns(Symbol) } + attr_accessor :type + + # An object describing text to classify. + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # A string of text to classify. + text:, + # Always `text`. + type: :text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/other_file_chunking_strategy_object.rbi b/rbi/openai/models/other_file_chunking_strategy_object.rbi new file mode 100644 index 00000000..744dd2d8 --- /dev/null +++ b/rbi/openai/models/other_file_chunking_strategy_object.rbi @@ -0,0 +1,33 @@ +# typed: strong + +module OpenAI + module Models + class OtherFileChunkingStrategyObject < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::OtherFileChunkingStrategyObject, + OpenAI::Internal::AnyHash + ) + end + + # Always `other`. + sig { returns(Symbol) } + attr_accessor :type + + # This is returned when the chunking strategy is unknown. Typically, this is + # because the file was indexed before the `chunking_strategy` concept was + # introduced in the API. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Always `other`. + type: :other + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/reasoning.rbi b/rbi/openai/models/reasoning.rbi new file mode 100644 index 00000000..f6364147 --- /dev/null +++ b/rbi/openai/models/reasoning.rbi @@ -0,0 +1,125 @@ +# typed: strong + +module OpenAI + module Models + class Reasoning < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::Reasoning, OpenAI::Internal::AnyHash) } + + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } + attr_accessor :effort + + # **Deprecated:** use `summary` instead. + # + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `auto`, + # `concise`, or `detailed`. + sig { returns(T.nilable(OpenAI::Reasoning::GenerateSummary::OrSymbol)) } + attr_accessor :generate_summary + + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `auto`, + # `concise`, or `detailed`. + sig { returns(T.nilable(OpenAI::Reasoning::Summary::OrSymbol)) } + attr_accessor :summary + + # **gpt-5 and o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + sig do + params( + effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + generate_summary: + T.nilable(OpenAI::Reasoning::GenerateSummary::OrSymbol), + summary: T.nilable(OpenAI::Reasoning::Summary::OrSymbol) + ).returns(T.attached_class) + end + def self.new( + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + effort: nil, + # **Deprecated:** use `summary` instead. + # + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `auto`, + # `concise`, or `detailed`. + generate_summary: nil, + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `auto`, + # `concise`, or `detailed`. + summary: nil + ) + end + + sig do + override.returns( + { + effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + generate_summary: + T.nilable(OpenAI::Reasoning::GenerateSummary::OrSymbol), + summary: T.nilable(OpenAI::Reasoning::Summary::OrSymbol) + } + ) + end + def to_hash + end + + # **Deprecated:** use `summary` instead. + # + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `auto`, + # `concise`, or `detailed`. + module GenerateSummary + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Reasoning::GenerateSummary) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = T.let(:auto, OpenAI::Reasoning::GenerateSummary::TaggedSymbol) + CONCISE = + T.let(:concise, OpenAI::Reasoning::GenerateSummary::TaggedSymbol) + DETAILED = + T.let(:detailed, OpenAI::Reasoning::GenerateSummary::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Reasoning::GenerateSummary::TaggedSymbol] + ) + end + def self.values + end + end + + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `auto`, + # `concise`, or `detailed`. + module Summary + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Reasoning::Summary) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = T.let(:auto, OpenAI::Reasoning::Summary::TaggedSymbol) + CONCISE = T.let(:concise, OpenAI::Reasoning::Summary::TaggedSymbol) + DETAILED = T.let(:detailed, OpenAI::Reasoning::Summary::TaggedSymbol) + + sig do + override.returns(T::Array[OpenAI::Reasoning::Summary::TaggedSymbol]) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/reasoning_effort.rbi b/rbi/openai/models/reasoning_effort.rbi new file mode 100644 index 00000000..fb0629b1 --- /dev/null +++ b/rbi/openai/models/reasoning_effort.rbi @@ -0,0 +1,26 @@ +# typed: strong + +module OpenAI + module Models + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + module ReasoningEffort + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ReasoningEffort) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MINIMAL = T.let(:minimal, OpenAI::ReasoningEffort::TaggedSymbol) + LOW = T.let(:low, OpenAI::ReasoningEffort::TaggedSymbol) + MEDIUM = T.let(:medium, OpenAI::ReasoningEffort::TaggedSymbol) + HIGH = T.let(:high, OpenAI::ReasoningEffort::TaggedSymbol) + + sig { override.returns(T::Array[OpenAI::ReasoningEffort::TaggedSymbol]) } + def self.values + end + end + end +end diff --git a/rbi/openai/models/response_format_json_object.rbi b/rbi/openai/models/response_format_json_object.rbi new file mode 100644 index 00000000..6aebd1ed --- /dev/null +++ b/rbi/openai/models/response_format_json_object.rbi @@ -0,0 +1,30 @@ +# typed: strong + +module OpenAI + module Models + class ResponseFormatJSONObject < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ResponseFormatJSONObject, OpenAI::Internal::AnyHash) + end + + # The type of response format being defined. Always `json_object`. + sig { returns(Symbol) } + attr_accessor :type + + # JSON object response format. An older method of generating JSON responses. Using + # `json_schema` is recommended for models that support it. Note that the model + # will not generate JSON without a system or user message instructing it to do so. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of response format being defined. Always `json_object`. + type: :json_object + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/response_format_json_schema.rbi b/rbi/openai/models/response_format_json_schema.rbi new file mode 100644 index 00000000..8155e9c9 --- /dev/null +++ b/rbi/openai/models/response_format_json_schema.rbi @@ -0,0 +1,135 @@ +# typed: strong + +module OpenAI + module Models + class ResponseFormatJSONSchema < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ResponseFormatJSONSchema, OpenAI::Internal::AnyHash) + end + + # Structured Outputs configuration options, including a JSON Schema. + sig { returns(OpenAI::ResponseFormatJSONSchema::JSONSchema) } + attr_reader :json_schema + + sig do + params( + json_schema: OpenAI::ResponseFormatJSONSchema::JSONSchema::OrHash + ).void + end + attr_writer :json_schema + + # The type of response format being defined. Always `json_schema`. + sig { returns(Symbol) } + attr_accessor :type + + # JSON Schema response format. Used to generate structured JSON responses. Learn + # more about + # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + sig do + params( + json_schema: OpenAI::ResponseFormatJSONSchema::JSONSchema::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Structured Outputs configuration options, including a JSON Schema. + json_schema:, + # The type of response format being defined. Always `json_schema`. + type: :json_schema + ) + end + + sig do + override.returns( + { + json_schema: OpenAI::ResponseFormatJSONSchema::JSONSchema, + type: Symbol + } + ) + end + def to_hash + end + + class JSONSchema < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::ResponseFormatJSONSchema::JSONSchema, + OpenAI::Internal::AnyHash + ) + end + + # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + # and dashes, with a maximum length of 64. + sig { returns(String) } + attr_accessor :name + + # A description of what the response format is for, used by the model to determine + # how to respond in the format. + sig { returns(T.nilable(String)) } + attr_reader :description + + sig { params(description: String).void } + attr_writer :description + + # The schema for the response format, described as a JSON Schema object. Learn how + # to build JSON schemas [here](https://json-schema.org/). + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_reader :schema + + sig { params(schema: T::Hash[Symbol, T.anything]).void } + attr_writer :schema + + # Whether to enable strict schema adherence when generating the output. If set to + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :strict + + # Structured Outputs configuration options, including a JSON Schema. + sig do + params( + name: String, + description: String, + schema: T::Hash[Symbol, T.anything], + strict: T.nilable(T::Boolean) + ).returns(T.attached_class) + end + def self.new( + # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + # and dashes, with a maximum length of 64. + name:, + # A description of what the response format is for, used by the model to determine + # how to respond in the format. + description: nil, + # The schema for the response format, described as a JSON Schema object. Learn how + # to build JSON schemas [here](https://json-schema.org/). + schema: nil, + # Whether to enable strict schema adherence when generating the output. If set to + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + strict: nil + ) + end + + sig do + override.returns( + { + name: String, + description: String, + schema: T::Hash[Symbol, T.anything], + strict: T.nilable(T::Boolean) + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/response_format_text.rbi b/rbi/openai/models/response_format_text.rbi new file mode 100644 index 00000000..24fa0549 --- /dev/null +++ b/rbi/openai/models/response_format_text.rbi @@ -0,0 +1,28 @@ +# typed: strong + +module OpenAI + module Models + class ResponseFormatText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ResponseFormatText, OpenAI::Internal::AnyHash) + end + + # The type of response format being defined. Always `text`. + sig { returns(Symbol) } + attr_accessor :type + + # Default response format. Used to generate text responses. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of response format being defined. Always `text`. + type: :text + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/response_format_text_grammar.rbi b/rbi/openai/models/response_format_text_grammar.rbi new file mode 100644 index 00000000..771688ea --- /dev/null +++ b/rbi/openai/models/response_format_text_grammar.rbi @@ -0,0 +1,35 @@ +# typed: strong + +module OpenAI + module Models + class ResponseFormatTextGrammar < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ResponseFormatTextGrammar, OpenAI::Internal::AnyHash) + end + + # The custom grammar for the model to follow. + sig { returns(String) } + attr_accessor :grammar + + # The type of response format being defined. Always `grammar`. + sig { returns(Symbol) } + attr_accessor :type + + # A custom grammar for the model to follow when generating text. Learn more in the + # [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars). + sig { params(grammar: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The custom grammar for the model to follow. + grammar:, + # The type of response format being defined. Always `grammar`. + type: :grammar + ) + end + + sig { override.returns({ grammar: String, type: Symbol }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/response_format_text_python.rbi b/rbi/openai/models/response_format_text_python.rbi new file mode 100644 index 00000000..cc36114b --- /dev/null +++ b/rbi/openai/models/response_format_text_python.rbi @@ -0,0 +1,30 @@ +# typed: strong + +module OpenAI + module Models + class ResponseFormatTextPython < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ResponseFormatTextPython, OpenAI::Internal::AnyHash) + end + + # The type of response format being defined. Always `python`. + sig { returns(Symbol) } + attr_accessor :type + + # Configure the model to generate valid Python code. See the + # [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars) + # for more details. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of response format being defined. Always `python`. + type: :python + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/responses/computer_tool.rbi b/rbi/openai/models/responses/computer_tool.rbi new file mode 100644 index 00000000..af8eff7a --- /dev/null +++ b/rbi/openai/models/responses/computer_tool.rbi @@ -0,0 +1,113 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ComputerTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::ComputerTool, OpenAI::Internal::AnyHash) + end + + # The height of the computer display. + sig { returns(Integer) } + attr_accessor :display_height + + # The width of the computer display. + sig { returns(Integer) } + attr_accessor :display_width + + # The type of computer environment to control. + sig { returns(OpenAI::Responses::ComputerTool::Environment::OrSymbol) } + attr_accessor :environment + + # The type of the computer use tool. Always `computer_use_preview`. + sig { returns(Symbol) } + attr_accessor :type + + # A tool that controls a virtual computer. Learn more about the + # [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + sig do + params( + display_height: Integer, + display_width: Integer, + environment: OpenAI::Responses::ComputerTool::Environment::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The height of the computer display. + display_height:, + # The width of the computer display. + display_width:, + # The type of computer environment to control. + environment:, + # The type of the computer use tool. Always `computer_use_preview`. + type: :computer_use_preview + ) + end + + sig do + override.returns( + { + display_height: Integer, + display_width: Integer, + environment: + OpenAI::Responses::ComputerTool::Environment::OrSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The type of computer environment to control. + module Environment + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ComputerTool::Environment) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + WINDOWS = + T.let( + :windows, + OpenAI::Responses::ComputerTool::Environment::TaggedSymbol + ) + MAC = + T.let( + :mac, + OpenAI::Responses::ComputerTool::Environment::TaggedSymbol + ) + LINUX = + T.let( + :linux, + OpenAI::Responses::ComputerTool::Environment::TaggedSymbol + ) + UBUNTU = + T.let( + :ubuntu, + OpenAI::Responses::ComputerTool::Environment::TaggedSymbol + ) + BROWSER = + T.let( + :browser, + OpenAI::Responses::ComputerTool::Environment::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ComputerTool::Environment::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/custom_tool.rbi b/rbi/openai/models/responses/custom_tool.rbi new file mode 100644 index 00000000..066ca268 --- /dev/null +++ b/rbi/openai/models/responses/custom_tool.rbi @@ -0,0 +1,96 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class CustomTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::CustomTool, OpenAI::Internal::AnyHash) + end + + # The name of the custom tool, used to identify it in tool calls. + sig { returns(String) } + attr_accessor :name + + # The type of the custom tool. Always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # Optional description of the custom tool, used to provide more context. + sig { returns(T.nilable(String)) } + attr_reader :description + + sig { params(description: String).void } + attr_writer :description + + # The input format for the custom tool. Default is unconstrained text. + sig do + returns( + T.nilable( + T.any( + OpenAI::CustomToolInputFormat::Text, + OpenAI::CustomToolInputFormat::Grammar + ) + ) + ) + end + attr_reader :format_ + + sig do + params( + format_: + T.any( + OpenAI::CustomToolInputFormat::Text::OrHash, + OpenAI::CustomToolInputFormat::Grammar::OrHash + ) + ).void + end + attr_writer :format_ + + # A custom tool that processes input using a specified format. Learn more about + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools). + sig do + params( + name: String, + description: String, + format_: + T.any( + OpenAI::CustomToolInputFormat::Text::OrHash, + OpenAI::CustomToolInputFormat::Grammar::OrHash + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The name of the custom tool, used to identify it in tool calls. + name:, + # Optional description of the custom tool, used to provide more context. + description: nil, + # The input format for the custom tool. Default is unconstrained text. + format_: nil, + # The type of the custom tool. Always `custom`. + type: :custom + ) + end + + sig do + override.returns( + { + name: String, + type: Symbol, + description: String, + format_: + T.any( + OpenAI::CustomToolInputFormat::Text, + OpenAI::CustomToolInputFormat::Grammar + ) + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/easy_input_message.rbi b/rbi/openai/models/responses/easy_input_message.rbi new file mode 100644 index 00000000..10935954 --- /dev/null +++ b/rbi/openai/models/responses/easy_input_message.rbi @@ -0,0 +1,164 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class EasyInputMessage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::EasyInputMessage, + OpenAI::Internal::AnyHash + ) + end + + # Text, image, or audio input to the model, used to generate a response. Can also + # contain previous assistant responses. + sig { returns(OpenAI::Responses::EasyInputMessage::Content::Variants) } + attr_accessor :content + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + sig { returns(OpenAI::Responses::EasyInputMessage::Role::OrSymbol) } + attr_accessor :role + + # The type of the message input. Always `message`. + sig do + returns( + T.nilable(OpenAI::Responses::EasyInputMessage::Type::OrSymbol) + ) + end + attr_reader :type + + sig do + params(type: OpenAI::Responses::EasyInputMessage::Type::OrSymbol).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + sig do + params( + content: OpenAI::Responses::EasyInputMessage::Content::Variants, + role: OpenAI::Responses::EasyInputMessage::Role::OrSymbol, + type: OpenAI::Responses::EasyInputMessage::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # Text, image, or audio input to the model, used to generate a response. Can also + # contain previous assistant responses. + content:, + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + role:, + # The type of the message input. Always `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: OpenAI::Responses::EasyInputMessage::Content::Variants, + role: OpenAI::Responses::EasyInputMessage::Role::OrSymbol, + type: OpenAI::Responses::EasyInputMessage::Type::OrSymbol + } + ) + end + def to_hash + end + + # Text, image, or audio input to the model, used to generate a response. Can also + # contain previous assistant responses. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[OpenAI::Responses::ResponseInputContent::Variants] + ) + end + + sig do + override.returns( + T::Array[OpenAI::Responses::EasyInputMessage::Content::Variants] + ) + end + def self.variants + end + end + + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::EasyInputMessage::Role) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Responses::EasyInputMessage::Role::TaggedSymbol + ) + ASSISTANT = + T.let( + :assistant, + OpenAI::Responses::EasyInputMessage::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Responses::EasyInputMessage::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Responses::EasyInputMessage::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Responses::EasyInputMessage::Role::TaggedSymbol] + ) + end + def self.values + end + end + + # The type of the message input. Always `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::EasyInputMessage::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Responses::EasyInputMessage::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Responses::EasyInputMessage::Type::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/file_search_tool.rbi b/rbi/openai/models/responses/file_search_tool.rbi new file mode 100644 index 00000000..29ff2004 --- /dev/null +++ b/rbi/openai/models/responses/file_search_tool.rbi @@ -0,0 +1,223 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class FileSearchTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::FileSearchTool, OpenAI::Internal::AnyHash) + end + + # The type of the file search tool. Always `file_search`. + sig { returns(Symbol) } + attr_accessor :type + + # The IDs of the vector stores to search. + sig { returns(T::Array[String]) } + attr_accessor :vector_store_ids + + # A filter to apply. + sig do + returns( + T.nilable(T.any(OpenAI::ComparisonFilter, OpenAI::CompoundFilter)) + ) + end + attr_accessor :filters + + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. + sig { returns(T.nilable(Integer)) } + attr_reader :max_num_results + + sig { params(max_num_results: Integer).void } + attr_writer :max_num_results + + # Ranking options for search. + sig do + returns(T.nilable(OpenAI::Responses::FileSearchTool::RankingOptions)) + end + attr_reader :ranking_options + + sig do + params( + ranking_options: + OpenAI::Responses::FileSearchTool::RankingOptions::OrHash + ).void + end + attr_writer :ranking_options + + # A tool that searches for relevant content from uploaded files. Learn more about + # the + # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + sig do + params( + vector_store_ids: T::Array[String], + filters: + T.nilable( + T.any( + OpenAI::ComparisonFilter::OrHash, + OpenAI::CompoundFilter::OrHash + ) + ), + max_num_results: Integer, + ranking_options: + OpenAI::Responses::FileSearchTool::RankingOptions::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The IDs of the vector stores to search. + vector_store_ids:, + # A filter to apply. + filters: nil, + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. + max_num_results: nil, + # Ranking options for search. + ranking_options: nil, + # The type of the file search tool. Always `file_search`. + type: :file_search + ) + end + + sig do + override.returns( + { + type: Symbol, + vector_store_ids: T::Array[String], + filters: + T.nilable( + T.any(OpenAI::ComparisonFilter, OpenAI::CompoundFilter) + ), + max_num_results: Integer, + ranking_options: OpenAI::Responses::FileSearchTool::RankingOptions + } + ) + end + def to_hash + end + + # A filter to apply. + module Filters + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any(OpenAI::ComparisonFilter, OpenAI::CompoundFilter) + end + + sig do + override.returns( + T::Array[OpenAI::Responses::FileSearchTool::Filters::Variants] + ) + end + def self.variants + end + end + + class RankingOptions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::FileSearchTool::RankingOptions, + OpenAI::Internal::AnyHash + ) + end + + # The ranker to use for the file search. + sig do + returns( + T.nilable( + OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::OrSymbol + ) + ) + end + attr_reader :ranker + + sig do + params( + ranker: + OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::OrSymbol + ).void + end + attr_writer :ranker + + # The score threshold for the file search, a number between 0 and 1. Numbers + # closer to 1 will attempt to return only the most relevant results, but may + # return fewer results. + sig { returns(T.nilable(Float)) } + attr_reader :score_threshold + + sig { params(score_threshold: Float).void } + attr_writer :score_threshold + + # Ranking options for search. + sig do + params( + ranker: + OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::OrSymbol, + score_threshold: Float + ).returns(T.attached_class) + end + def self.new( + # The ranker to use for the file search. + ranker: nil, + # The score threshold for the file search, a number between 0 and 1. Numbers + # closer to 1 will attempt to return only the most relevant results, but may + # return fewer results. + score_threshold: nil + ) + end + + sig do + override.returns( + { + ranker: + OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::OrSymbol, + score_threshold: Float + } + ) + end + def to_hash + end + + # The ranker to use for the file search. + module Ranker + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::FileSearchTool::RankingOptions::Ranker + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::TaggedSymbol + ) + DEFAULT_2024_11_15 = + T.let( + :"default-2024-11-15", + OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/function_tool.rbi b/rbi/openai/models/responses/function_tool.rbi new file mode 100644 index 00000000..16f8f2d7 --- /dev/null +++ b/rbi/openai/models/responses/function_tool.rbi @@ -0,0 +1,76 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class FunctionTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::FunctionTool, OpenAI::Internal::AnyHash) + end + + # The name of the function to call. + sig { returns(String) } + attr_accessor :name + + # A JSON schema object describing the parameters of the function. + sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } + attr_accessor :parameters + + # Whether to enforce strict parameter validation. Default `true`. + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :strict + + # The type of the function tool. Always `function`. + sig { returns(Symbol) } + attr_accessor :type + + # A description of the function. Used by the model to determine whether or not to + # call the function. + sig { returns(T.nilable(String)) } + attr_accessor :description + + # Defines a function in your own code the model can choose to call. Learn more + # about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + sig do + params( + name: String, + parameters: T.nilable(T::Hash[Symbol, T.anything]), + strict: T.nilable(T::Boolean), + description: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The name of the function to call. + name:, + # A JSON schema object describing the parameters of the function. + parameters:, + # Whether to enforce strict parameter validation. Default `true`. + strict:, + # A description of the function. Used by the model to determine whether or not to + # call the function. + description: nil, + # The type of the function tool. Always `function`. + type: :function + ) + end + + sig do + override.returns( + { + name: String, + parameters: T.nilable(T::Hash[Symbol, T.anything]), + strict: T.nilable(T::Boolean), + type: Symbol, + description: T.nilable(String) + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/input_item_list_params.rbi b/rbi/openai/models/responses/input_item_list_params.rbi new file mode 100644 index 00000000..02e79111 --- /dev/null +++ b/rbi/openai/models/responses/input_item_list_params.rbi @@ -0,0 +1,146 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class InputItemListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::InputItemListParams, + OpenAI::Internal::AnyHash + ) + end + + # An item ID to list items after, used in pagination. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. + sig do + returns( + T.nilable(T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]) + ) + end + attr_reader :include + + sig do + params( + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] + ).void + end + attr_writer :include + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + sig do + returns( + T.nilable(OpenAI::Responses::InputItemListParams::Order::OrSymbol) + ) + end + attr_reader :order + + sig do + params( + order: OpenAI::Responses::InputItemListParams::Order::OrSymbol + ).void + end + attr_writer :order + + sig do + params( + after: String, + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + limit: Integer, + order: OpenAI::Responses::InputItemListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # An item ID to list items after, used in pagination. + after: nil, + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. + include: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + include: + T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + limit: Integer, + order: OpenAI::Responses::InputItemListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::InputItemListParams::Order) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let( + :asc, + OpenAI::Responses::InputItemListParams::Order::TaggedSymbol + ) + DESC = + T.let( + :desc, + OpenAI::Responses::InputItemListParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::InputItemListParams::Order::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response.rbi b/rbi/openai/models/responses/response.rbi new file mode 100644 index 00000000..efacebab --- /dev/null +++ b/rbi/openai/models/responses/response.rbi @@ -0,0 +1,848 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class Response < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::Response, OpenAI::Internal::AnyHash) + end + + # Unique identifier for this Response. + sig { returns(String) } + attr_accessor :id + + # Unix timestamp (in seconds) of when this Response was created. + sig { returns(Float) } + attr_accessor :created_at + + # An error object returned when the model fails to generate a Response. + sig { returns(T.nilable(OpenAI::Responses::ResponseError)) } + attr_reader :error + + sig do + params( + error: T.nilable(OpenAI::Responses::ResponseError::OrHash) + ).void + end + attr_writer :error + + # Details about why the response is incomplete. + sig do + returns(T.nilable(OpenAI::Responses::Response::IncompleteDetails)) + end + attr_reader :incomplete_details + + sig do + params( + incomplete_details: + T.nilable(OpenAI::Responses::Response::IncompleteDetails::OrHash) + ).void + end + attr_writer :incomplete_details + + # A system (or developer) message inserted into the model's context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. + sig do + returns( + T.nilable(OpenAI::Responses::Response::Instructions::Variants) + ) + end + attr_accessor :instructions + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + sig { returns(OpenAI::ResponsesModel::Variants) } + attr_accessor :model + + # The object type of this resource - always set to `response`. + sig { returns(Symbol) } + attr_accessor :object + + # An array of content items generated by the model. + # + # - The length and order of items in the `output` array is dependent on the + # model's response. + # - Rather than accessing the first item in the `output` array and assuming it's + # an `assistant` message with the content generated by the model, you might + # consider using the `output_text` property where supported in SDKs. + sig do + returns(T::Array[OpenAI::Responses::ResponseOutputItem::Variants]) + end + attr_accessor :output + + # Whether to allow the model to run tool calls in parallel. + sig { returns(T::Boolean) } + attr_accessor :parallel_tool_calls + + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. + sig { returns(OpenAI::Responses::Response::ToolChoice::Variants) } + attr_accessor :tool_choice + + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. + sig { returns(T::Array[OpenAI::Responses::Tool::Variants]) } + attr_accessor :tools + + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # Whether to run the model response in the background. + # [Learn more](https://platform.openai.com/docs/guides/background). + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :background + + # The conversation that this response belongs to. Input items and output items + # from this response are automatically added to this conversation. + sig { returns(T.nilable(OpenAI::Responses::Response::Conversation)) } + attr_reader :conversation + + sig do + params( + conversation: + T.nilable(OpenAI::Responses::Response::Conversation::OrHash) + ).void + end + attr_writer :conversation + + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + sig { returns(T.nilable(Integer)) } + attr_accessor :max_output_tokens + + # The maximum number of total calls to built-in tools that can be processed in a + # response. This maximum number applies across all built-in tool calls, not per + # individual tool. Any further attempts to call a tool by the model will be + # ignored. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_tool_calls + + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # Cannot be used in conjunction with `conversation`. + sig { returns(T.nilable(String)) } + attr_accessor :previous_response_id + + # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + sig { returns(T.nilable(OpenAI::Responses::ResponsePrompt)) } + attr_reader :prompt + + sig do + params( + prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash) + ).void + end + attr_writer :prompt + + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + sig { returns(T.nilable(String)) } + attr_reader :prompt_cache_key + + sig { params(prompt_cache_key: String).void } + attr_writer :prompt_cache_key + + # **gpt-5 and o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + sig { returns(T.nilable(OpenAI::Reasoning)) } + attr_reader :reasoning + + sig { params(reasoning: T.nilable(OpenAI::Reasoning::OrHash)).void } + attr_writer :reasoning + + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + sig { returns(T.nilable(String)) } + attr_reader :safety_identifier + + sig { params(safety_identifier: String).void } + attr_writer :safety_identifier + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + sig do + returns( + T.nilable(OpenAI::Responses::Response::ServiceTier::TaggedSymbol) + ) + end + attr_accessor :service_tier + + # The status of the response generation. One of `completed`, `failed`, + # `in_progress`, `cancelled`, `queued`, or `incomplete`. + sig do + returns(T.nilable(OpenAI::Responses::ResponseStatus::TaggedSymbol)) + end + attr_reader :status + + sig { params(status: OpenAI::Responses::ResponseStatus::OrSymbol).void } + attr_writer :status + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig { returns(T.nilable(OpenAI::Responses::ResponseTextConfig)) } + attr_reader :text + + sig { params(text: OpenAI::Responses::ResponseTextConfig::OrHash).void } + attr_writer :text + + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + sig { returns(T.nilable(Integer)) } + attr_accessor :top_logprobs + + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. + sig do + returns( + T.nilable(OpenAI::Responses::Response::Truncation::TaggedSymbol) + ) + end + attr_accessor :truncation + + # Represents token usage details including input tokens, output tokens, a + # breakdown of output tokens, and the total tokens used. + sig { returns(T.nilable(OpenAI::Responses::ResponseUsage)) } + attr_reader :usage + + sig { params(usage: OpenAI::Responses::ResponseUsage::OrHash).void } + attr_writer :usage + + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + sig { returns(T.nilable(String)) } + attr_reader :user + + sig { params(user: String).void } + attr_writer :user + + sig do + params( + id: String, + created_at: Float, + error: T.nilable(OpenAI::Responses::ResponseError::OrHash), + incomplete_details: + T.nilable(OpenAI::Responses::Response::IncompleteDetails::OrHash), + instructions: + T.nilable(OpenAI::Responses::Response::Instructions::Variants), + metadata: T.nilable(T::Hash[Symbol, String]), + model: + T.any( + String, + OpenAI::ChatModel::OrSymbol, + OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol + ), + output: + T::Array[ + T.any( + OpenAI::Responses::ResponseOutputMessage::OrHash, + OpenAI::Responses::ResponseFileSearchToolCall::OrHash, + OpenAI::Responses::ResponseFunctionToolCall::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::OrHash, + OpenAI::Responses::ResponseComputerToolCall::OrHash, + OpenAI::Responses::ResponseReasoningItem::OrHash, + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::OrHash, + OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, + OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseOutputItem::McpCall::OrHash, + OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash, + OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash + ) + ], + parallel_tool_calls: T::Boolean, + temperature: T.nilable(Float), + tool_choice: + T.any( + OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, + OpenAI::Responses::ToolChoiceTypes::OrHash, + OpenAI::Responses::ToolChoiceFunction::OrHash, + OpenAI::Responses::ToolChoiceMcp::OrHash, + OpenAI::Responses::ToolChoiceCustom::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ], + top_p: T.nilable(Float), + background: T.nilable(T::Boolean), + conversation: + T.nilable(OpenAI::Responses::Response::Conversation::OrHash), + max_output_tokens: T.nilable(Integer), + max_tool_calls: T.nilable(Integer), + previous_response_id: T.nilable(String), + prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash), + prompt_cache_key: String, + reasoning: T.nilable(OpenAI::Reasoning::OrHash), + safety_identifier: String, + service_tier: + T.nilable(OpenAI::Responses::Response::ServiceTier::OrSymbol), + status: OpenAI::Responses::ResponseStatus::OrSymbol, + text: OpenAI::Responses::ResponseTextConfig::OrHash, + top_logprobs: T.nilable(Integer), + truncation: + T.nilable(OpenAI::Responses::Response::Truncation::OrSymbol), + usage: OpenAI::Responses::ResponseUsage::OrHash, + user: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique identifier for this Response. + id:, + # Unix timestamp (in seconds) of when this Response was created. + created_at:, + # An error object returned when the model fails to generate a Response. + error:, + # Details about why the response is incomplete. + incomplete_details:, + # A system (or developer) message inserted into the model's context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. + instructions:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + model:, + # An array of content items generated by the model. + # + # - The length and order of items in the `output` array is dependent on the + # model's response. + # - Rather than accessing the first item in the `output` array and assuming it's + # an `assistant` message with the content generated by the model, you might + # consider using the `output_text` property where supported in SDKs. + output:, + # Whether to allow the model to run tool calls in parallel. + parallel_tool_calls:, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. + temperature:, + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. + tool_choice:, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. + tools:, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + top_p:, + # Whether to run the model response in the background. + # [Learn more](https://platform.openai.com/docs/guides/background). + background: nil, + # The conversation that this response belongs to. Input items and output items + # from this response are automatically added to this conversation. + conversation: nil, + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_output_tokens: nil, + # The maximum number of total calls to built-in tools that can be processed in a + # response. This maximum number applies across all built-in tool calls, not per + # individual tool. Any further attempts to call a tool by the model will be + # ignored. + max_tool_calls: nil, + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # Cannot be used in conjunction with `conversation`. + previous_response_id: nil, + # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt: nil, + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_key: nil, + # **gpt-5 and o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + reasoning: nil, + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + safety_identifier: nil, + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + service_tier: nil, + # The status of the response generation. One of `completed`, `failed`, + # `in_progress`, `cancelled`, `queued`, or `incomplete`. + status: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + text: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + top_logprobs: nil, + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. + truncation: nil, + # Represents token usage details including input tokens, output tokens, a + # breakdown of output tokens, and the total tokens used. + usage: nil, + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + user: nil, + # The object type of this resource - always set to `response`. + object: :response + ) + end + + sig do + override.returns( + { + id: String, + created_at: Float, + error: T.nilable(OpenAI::Responses::ResponseError), + incomplete_details: + T.nilable(OpenAI::Responses::Response::IncompleteDetails), + instructions: + T.nilable(OpenAI::Responses::Response::Instructions::Variants), + metadata: T.nilable(T::Hash[Symbol, String]), + model: OpenAI::ResponsesModel::Variants, + object: Symbol, + output: T::Array[OpenAI::Responses::ResponseOutputItem::Variants], + parallel_tool_calls: T::Boolean, + temperature: T.nilable(Float), + tool_choice: OpenAI::Responses::Response::ToolChoice::Variants, + tools: T::Array[OpenAI::Responses::Tool::Variants], + top_p: T.nilable(Float), + background: T.nilable(T::Boolean), + conversation: + T.nilable(OpenAI::Responses::Response::Conversation), + max_output_tokens: T.nilable(Integer), + max_tool_calls: T.nilable(Integer), + previous_response_id: T.nilable(String), + prompt: T.nilable(OpenAI::Responses::ResponsePrompt), + prompt_cache_key: String, + reasoning: T.nilable(OpenAI::Reasoning), + safety_identifier: String, + service_tier: + T.nilable( + OpenAI::Responses::Response::ServiceTier::TaggedSymbol + ), + status: OpenAI::Responses::ResponseStatus::TaggedSymbol, + text: OpenAI::Responses::ResponseTextConfig, + top_logprobs: T.nilable(Integer), + truncation: + T.nilable( + OpenAI::Responses::Response::Truncation::TaggedSymbol + ), + usage: OpenAI::Responses::ResponseUsage, + user: String + } + ) + end + def to_hash + end + + class IncompleteDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Response::IncompleteDetails, + OpenAI::Internal::AnyHash + ) + end + + # The reason why the response is incomplete. + sig do + returns( + T.nilable( + OpenAI::Responses::Response::IncompleteDetails::Reason::TaggedSymbol + ) + ) + end + attr_reader :reason + + sig do + params( + reason: + OpenAI::Responses::Response::IncompleteDetails::Reason::OrSymbol + ).void + end + attr_writer :reason + + # Details about why the response is incomplete. + sig do + params( + reason: + OpenAI::Responses::Response::IncompleteDetails::Reason::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The reason why the response is incomplete. + reason: nil + ) + end + + sig do + override.returns( + { + reason: + OpenAI::Responses::Response::IncompleteDetails::Reason::TaggedSymbol + } + ) + end + def to_hash + end + + # The reason why the response is incomplete. + module Reason + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::Response::IncompleteDetails::Reason + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MAX_OUTPUT_TOKENS = + T.let( + :max_output_tokens, + OpenAI::Responses::Response::IncompleteDetails::Reason::TaggedSymbol + ) + CONTENT_FILTER = + T.let( + :content_filter, + OpenAI::Responses::Response::IncompleteDetails::Reason::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Response::IncompleteDetails::Reason::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + # A system (or developer) message inserted into the model's context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. + module Instructions + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[OpenAI::Responses::ResponseInputItem::Variants] + ) + end + + sig do + override.returns( + T::Array[OpenAI::Responses::Response::Instructions::Variants] + ) + end + def self.variants + end + + ResponseInputItemArray = + T.let( + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::Responses::ResponseInputItem + ], + OpenAI::Internal::Type::Converter + ) + end + + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. + module ToolChoice + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ToolChoiceOptions::TaggedSymbol, + OpenAI::Responses::ToolChoiceAllowed, + OpenAI::Responses::ToolChoiceTypes, + OpenAI::Responses::ToolChoiceFunction, + OpenAI::Responses::ToolChoiceMcp, + OpenAI::Responses::ToolChoiceCustom + ) + end + + sig do + override.returns( + T::Array[OpenAI::Responses::Response::ToolChoice::Variants] + ) + end + def self.variants + end + end + + class Conversation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Response::Conversation, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the conversation. + sig { returns(String) } + attr_accessor :id + + # The conversation that this response belongs to. Input items and output items + # from this response are automatically added to this conversation. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the conversation. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + module ServiceTier + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::Response::ServiceTier) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let(:auto, OpenAI::Responses::Response::ServiceTier::TaggedSymbol) + DEFAULT = + T.let( + :default, + OpenAI::Responses::Response::ServiceTier::TaggedSymbol + ) + FLEX = + T.let(:flex, OpenAI::Responses::Response::ServiceTier::TaggedSymbol) + SCALE = + T.let( + :scale, + OpenAI::Responses::Response::ServiceTier::TaggedSymbol + ) + PRIORITY = + T.let( + :priority, + OpenAI::Responses::Response::ServiceTier::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Responses::Response::ServiceTier::TaggedSymbol] + ) + end + def self.values + end + end + + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. + module Truncation + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::Response::Truncation) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let(:auto, OpenAI::Responses::Response::Truncation::TaggedSymbol) + DISABLED = + T.let( + :disabled, + OpenAI::Responses::Response::Truncation::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Responses::Response::Truncation::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_audio_delta_event.rbi b/rbi/openai/models/responses/response_audio_delta_event.rbi new file mode 100644 index 00000000..fbc13de2 --- /dev/null +++ b/rbi/openai/models/responses/response_audio_delta_event.rbi @@ -0,0 +1,53 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseAudioDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseAudioDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # A chunk of Base64 encoded response audio bytes. + sig { returns(String) } + attr_accessor :delta + + # A sequence number for this chunk of the stream response. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.audio.delta`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when there is a partial audio response. + sig do + params(delta: String, sequence_number: Integer, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # A chunk of Base64 encoded response audio bytes. + delta:, + # A sequence number for this chunk of the stream response. + sequence_number:, + # The type of the event. Always `response.audio.delta`. + type: :"response.audio.delta" + ) + end + + sig do + override.returns( + { delta: String, sequence_number: Integer, type: Symbol } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_audio_done_event.rbi b/rbi/openai/models/responses/response_audio_done_event.rbi new file mode 100644 index 00000000..20be4298 --- /dev/null +++ b/rbi/openai/models/responses/response_audio_done_event.rbi @@ -0,0 +1,43 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseAudioDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseAudioDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The sequence number of the delta. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.audio.done`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the audio response is complete. + sig do + params(sequence_number: Integer, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The sequence number of the delta. + sequence_number:, + # The type of the event. Always `response.audio.done`. + type: :"response.audio.done" + ) + end + + sig { override.returns({ sequence_number: Integer, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_audio_transcript_delta_event.rbi b/rbi/openai/models/responses/response_audio_transcript_delta_event.rbi new file mode 100644 index 00000000..ef75b6be --- /dev/null +++ b/rbi/openai/models/responses/response_audio_transcript_delta_event.rbi @@ -0,0 +1,53 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseAudioTranscriptDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseAudioTranscriptDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The partial transcript of the audio response. + sig { returns(String) } + attr_accessor :delta + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.audio.transcript.delta`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when there is a partial transcript of audio. + sig do + params(delta: String, sequence_number: Integer, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The partial transcript of the audio response. + delta:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.audio.transcript.delta`. + type: :"response.audio.transcript.delta" + ) + end + + sig do + override.returns( + { delta: String, sequence_number: Integer, type: Symbol } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_audio_transcript_done_event.rbi b/rbi/openai/models/responses/response_audio_transcript_done_event.rbi new file mode 100644 index 00000000..f6a29e91 --- /dev/null +++ b/rbi/openai/models/responses/response_audio_transcript_done_event.rbi @@ -0,0 +1,43 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseAudioTranscriptDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseAudioTranscriptDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.audio.transcript.done`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the full audio transcript is completed. + sig do + params(sequence_number: Integer, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.audio.transcript.done`. + type: :"response.audio.transcript.done" + ) + end + + sig { override.returns({ sequence_number: Integer, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_cancel_params.rbi b/rbi/openai/models/responses/response_cancel_params.rbi new file mode 100644 index 00000000..296eaaab --- /dev/null +++ b/rbi/openai/models/responses/response_cancel_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCancelParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi b/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi new file mode 100644 index 00000000..91935135 --- /dev/null +++ b/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi @@ -0,0 +1,77 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The partial code snippet being streamed by the code interpreter. + sig { returns(String) } + attr_accessor :delta + + # The unique identifier of the code interpreter tool call item. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response for which the code is being + # streamed. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event, used to order streaming events. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.code_interpreter_call_code.delta`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a partial code snippet is streamed by the code interpreter. + sig do + params( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The partial code snippet being streamed by the code interpreter. + delta:, + # The unique identifier of the code interpreter tool call item. + item_id:, + # The index of the output item in the response for which the code is being + # streamed. + output_index:, + # The sequence number of this event, used to order streaming events. + sequence_number:, + # The type of the event. Always `response.code_interpreter_call_code.delta`. + type: :"response.code_interpreter_call_code.delta" + ) + end + + sig do + override.returns( + { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi b/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi new file mode 100644 index 00000000..ee42ae87 --- /dev/null +++ b/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi @@ -0,0 +1,75 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCodeInterpreterCallCodeDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The final code snippet output by the code interpreter. + sig { returns(String) } + attr_accessor :code + + # The unique identifier of the code interpreter tool call item. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response for which the code is finalized. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event, used to order streaming events. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.code_interpreter_call_code.done`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the code snippet is finalized by the code interpreter. + sig do + params( + code: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The final code snippet output by the code interpreter. + code:, + # The unique identifier of the code interpreter tool call item. + item_id:, + # The index of the output item in the response for which the code is finalized. + output_index:, + # The sequence number of this event, used to order streaming events. + sequence_number:, + # The type of the event. Always `response.code_interpreter_call_code.done`. + type: :"response.code_interpreter_call_code.done" + ) + end + + sig do + override.returns( + { + code: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi b/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi new file mode 100644 index 00000000..fdc86de0 --- /dev/null +++ b/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi @@ -0,0 +1,69 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCodeInterpreterCallCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCodeInterpreterCallCompletedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique identifier of the code interpreter tool call item. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response for which the code interpreter call + # is completed. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event, used to order streaming events. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.code_interpreter_call.completed`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the code interpreter call is completed. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique identifier of the code interpreter tool call item. + item_id:, + # The index of the output item in the response for which the code interpreter call + # is completed. + output_index:, + # The sequence number of this event, used to order streaming events. + sequence_number:, + # The type of the event. Always `response.code_interpreter_call.completed`. + type: :"response.code_interpreter_call.completed" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi b/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi new file mode 100644 index 00000000..c15d559e --- /dev/null +++ b/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi @@ -0,0 +1,69 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCodeInterpreterCallInProgressEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCodeInterpreterCallInProgressEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique identifier of the code interpreter tool call item. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response for which the code interpreter call + # is in progress. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event, used to order streaming events. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.code_interpreter_call.in_progress`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a code interpreter call is in progress. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique identifier of the code interpreter tool call item. + item_id:, + # The index of the output item in the response for which the code interpreter call + # is in progress. + output_index:, + # The sequence number of this event, used to order streaming events. + sequence_number:, + # The type of the event. Always `response.code_interpreter_call.in_progress`. + type: :"response.code_interpreter_call.in_progress" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi b/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi new file mode 100644 index 00000000..458cf8e1 --- /dev/null +++ b/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi @@ -0,0 +1,69 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCodeInterpreterCallInterpretingEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCodeInterpreterCallInterpretingEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique identifier of the code interpreter tool call item. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response for which the code interpreter is + # interpreting code. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event, used to order streaming events. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.code_interpreter_call.interpreting`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the code interpreter is actively interpreting the code snippet. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique identifier of the code interpreter tool call item. + item_id:, + # The index of the output item in the response for which the code interpreter is + # interpreting code. + output_index:, + # The sequence number of this event, used to order streaming events. + sequence_number:, + # The type of the event. Always `response.code_interpreter_call.interpreting`. + type: :"response.code_interpreter_call.interpreting" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi b/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi new file mode 100644 index 00000000..1d3f422f --- /dev/null +++ b/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi @@ -0,0 +1,258 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCodeInterpreterToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the code interpreter tool call. + sig { returns(String) } + attr_accessor :id + + # The code to run, or null if not available. + sig { returns(T.nilable(String)) } + attr_accessor :code + + # The ID of the container used to run the code. + sig { returns(String) } + attr_accessor :container_id + + # The outputs generated by the code interpreter, such as logs or images. Can be + # null if no outputs are available. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs, + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image + ) + ] + ) + ) + end + attr_accessor :outputs + + # The status of the code interpreter tool call. Valid values are `in_progress`, + # `completed`, `incomplete`, `interpreting`, and `failed`. + sig do + returns( + OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::OrSymbol + ) + end + attr_accessor :status + + # The type of the code interpreter tool call. Always `code_interpreter_call`. + sig { returns(Symbol) } + attr_accessor :type + + # A tool call to run code. + sig do + params( + id: String, + code: T.nilable(String), + container_id: String, + outputs: + T.nilable( + T::Array[ + T.any( + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs::OrHash, + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image::OrHash + ) + ] + ), + status: + OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the code interpreter tool call. + id:, + # The code to run, or null if not available. + code:, + # The ID of the container used to run the code. + container_id:, + # The outputs generated by the code interpreter, such as logs or images. Can be + # null if no outputs are available. + outputs:, + # The status of the code interpreter tool call. Valid values are `in_progress`, + # `completed`, `incomplete`, `interpreting`, and `failed`. + status:, + # The type of the code interpreter tool call. Always `code_interpreter_call`. + type: :code_interpreter_call + ) + end + + sig do + override.returns( + { + id: String, + code: T.nilable(String), + container_id: String, + outputs: + T.nilable( + T::Array[ + T.any( + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs, + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image + ) + ] + ), + status: + OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::OrSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The logs output from the code interpreter. + module Output + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs, + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image + ) + end + + class Logs < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs, + OpenAI::Internal::AnyHash + ) + end + + # The logs output from the code interpreter. + sig { returns(String) } + attr_accessor :logs + + # The type of the output. Always 'logs'. + sig { returns(Symbol) } + attr_accessor :type + + # The logs output from the code interpreter. + sig { params(logs: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The logs output from the code interpreter. + logs:, + # The type of the output. Always 'logs'. + type: :logs + ) + end + + sig { override.returns({ logs: String, type: Symbol }) } + def to_hash + end + end + + class Image < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image, + OpenAI::Internal::AnyHash + ) + end + + # The type of the output. Always 'image'. + sig { returns(Symbol) } + attr_accessor :type + + # The URL of the image output from the code interpreter. + sig { returns(String) } + attr_accessor :url + + # The image output from the code interpreter. + sig { params(url: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The URL of the image output from the code interpreter. + url:, + # The type of the output. Always 'image'. + type: :image + ) + end + + sig { override.returns({ type: Symbol, url: String }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Variants + ] + ) + end + def self.variants + end + end + + # The status of the code interpreter tool call. Valid values are `in_progress`, + # `completed`, `incomplete`, `interpreting`, and `failed`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseCodeInterpreterToolCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol + ) + INTERPRETING = + T.let( + :interpreting, + OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_completed_event.rbi b/rbi/openai/models/responses/response_completed_event.rbi new file mode 100644 index 00000000..a583bacf --- /dev/null +++ b/rbi/openai/models/responses/response_completed_event.rbi @@ -0,0 +1,62 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCompletedEvent, + OpenAI::Internal::AnyHash + ) + end + + # Properties of the completed response. + sig { returns(OpenAI::Responses::Response) } + attr_reader :response + + sig { params(response: OpenAI::Responses::Response::OrHash).void } + attr_writer :response + + # The sequence number for this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.completed`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the model response is complete. + sig do + params( + response: OpenAI::Responses::Response::OrHash, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Properties of the completed response. + response:, + # The sequence number for this event. + sequence_number:, + # The type of the event. Always `response.completed`. + type: :"response.completed" + ) + end + + sig do + override.returns( + { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_computer_tool_call.rbi b/rbi/openai/models/responses/response_computer_tool_call.rbi new file mode 100644 index 00000000..8bdf6af6 --- /dev/null +++ b/rbi/openai/models/responses/response_computer_tool_call.rbi @@ -0,0 +1,795 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseComputerToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the computer call. + sig { returns(String) } + attr_accessor :id + + # A click action. + sig do + returns( + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Click, + OpenAI::Responses::ResponseComputerToolCall::Action::DoubleClick, + OpenAI::Responses::ResponseComputerToolCall::Action::Drag, + OpenAI::Responses::ResponseComputerToolCall::Action::Keypress, + OpenAI::Responses::ResponseComputerToolCall::Action::Move, + OpenAI::Responses::ResponseComputerToolCall::Action::Screenshot, + OpenAI::Responses::ResponseComputerToolCall::Action::Scroll, + OpenAI::Responses::ResponseComputerToolCall::Action::Type, + OpenAI::Responses::ResponseComputerToolCall::Action::Wait + ) + ) + end + attr_accessor :action + + # An identifier used when responding to the tool call with output. + sig { returns(String) } + attr_accessor :call_id + + # The pending safety checks for the computer call. + sig do + returns( + T::Array[ + OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck + ] + ) + end + attr_accessor :pending_safety_checks + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + sig do + returns(OpenAI::Responses::ResponseComputerToolCall::Status::OrSymbol) + end + attr_accessor :status + + # The type of the computer call. Always `computer_call`. + sig do + returns(OpenAI::Responses::ResponseComputerToolCall::Type::OrSymbol) + end + attr_accessor :type + + # A tool call to a computer use tool. See the + # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + # for more information. + sig do + params( + id: String, + action: + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Click::OrHash, + OpenAI::Responses::ResponseComputerToolCall::Action::DoubleClick::OrHash, + OpenAI::Responses::ResponseComputerToolCall::Action::Drag::OrHash, + OpenAI::Responses::ResponseComputerToolCall::Action::Keypress::OrHash, + OpenAI::Responses::ResponseComputerToolCall::Action::Move::OrHash, + OpenAI::Responses::ResponseComputerToolCall::Action::Screenshot::OrHash, + OpenAI::Responses::ResponseComputerToolCall::Action::Scroll::OrHash, + OpenAI::Responses::ResponseComputerToolCall::Action::Type::OrHash, + OpenAI::Responses::ResponseComputerToolCall::Action::Wait::OrHash + ), + call_id: String, + pending_safety_checks: + T::Array[ + OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck::OrHash + ], + status: + OpenAI::Responses::ResponseComputerToolCall::Status::OrSymbol, + type: OpenAI::Responses::ResponseComputerToolCall::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the computer call. + id:, + # A click action. + action:, + # An identifier used when responding to the tool call with output. + call_id:, + # The pending safety checks for the computer call. + pending_safety_checks:, + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + status:, + # The type of the computer call. Always `computer_call`. + type: + ) + end + + sig do + override.returns( + { + id: String, + action: + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Click, + OpenAI::Responses::ResponseComputerToolCall::Action::DoubleClick, + OpenAI::Responses::ResponseComputerToolCall::Action::Drag, + OpenAI::Responses::ResponseComputerToolCall::Action::Keypress, + OpenAI::Responses::ResponseComputerToolCall::Action::Move, + OpenAI::Responses::ResponseComputerToolCall::Action::Screenshot, + OpenAI::Responses::ResponseComputerToolCall::Action::Scroll, + OpenAI::Responses::ResponseComputerToolCall::Action::Type, + OpenAI::Responses::ResponseComputerToolCall::Action::Wait + ), + call_id: String, + pending_safety_checks: + T::Array[ + OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck + ], + status: + OpenAI::Responses::ResponseComputerToolCall::Status::OrSymbol, + type: OpenAI::Responses::ResponseComputerToolCall::Type::OrSymbol + } + ) + end + def to_hash + end + + # A click action. + module Action + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Click, + OpenAI::Responses::ResponseComputerToolCall::Action::DoubleClick, + OpenAI::Responses::ResponseComputerToolCall::Action::Drag, + OpenAI::Responses::ResponseComputerToolCall::Action::Keypress, + OpenAI::Responses::ResponseComputerToolCall::Action::Move, + OpenAI::Responses::ResponseComputerToolCall::Action::Screenshot, + OpenAI::Responses::ResponseComputerToolCall::Action::Scroll, + OpenAI::Responses::ResponseComputerToolCall::Action::Type, + OpenAI::Responses::ResponseComputerToolCall::Action::Wait + ) + end + + class Click < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Click, + OpenAI::Internal::AnyHash + ) + end + + # Indicates which mouse button was pressed during the click. One of `left`, + # `right`, `wheel`, `back`, or `forward`. + sig do + returns( + OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button::OrSymbol + ) + end + attr_accessor :button + + # Specifies the event type. For a click action, this property is always set to + # `click`. + sig { returns(Symbol) } + attr_accessor :type + + # The x-coordinate where the click occurred. + sig { returns(Integer) } + attr_accessor :x + + # The y-coordinate where the click occurred. + sig { returns(Integer) } + attr_accessor :y_ + + # A click action. + sig do + params( + button: + OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button::OrSymbol, + x: Integer, + y_: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Indicates which mouse button was pressed during the click. One of `left`, + # `right`, `wheel`, `back`, or `forward`. + button:, + # The x-coordinate where the click occurred. + x:, + # The y-coordinate where the click occurred. + y_:, + # Specifies the event type. For a click action, this property is always set to + # `click`. + type: :click + ) + end + + sig do + override.returns( + { + button: + OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button::OrSymbol, + type: Symbol, + x: Integer, + y_: Integer + } + ) + end + def to_hash + end + + # Indicates which mouse button was pressed during the click. One of `left`, + # `right`, `wheel`, `back`, or `forward`. + module Button + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LEFT = + T.let( + :left, + OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button::TaggedSymbol + ) + RIGHT = + T.let( + :right, + OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button::TaggedSymbol + ) + WHEEL = + T.let( + :wheel, + OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button::TaggedSymbol + ) + BACK = + T.let( + :back, + OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button::TaggedSymbol + ) + FORWARD = + T.let( + :forward, + OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class DoubleClick < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::DoubleClick, + OpenAI::Internal::AnyHash + ) + end + + # Specifies the event type. For a double click action, this property is always set + # to `double_click`. + sig { returns(Symbol) } + attr_accessor :type + + # The x-coordinate where the double click occurred. + sig { returns(Integer) } + attr_accessor :x + + # The y-coordinate where the double click occurred. + sig { returns(Integer) } + attr_accessor :y_ + + # A double click action. + sig do + params(x: Integer, y_: Integer, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The x-coordinate where the double click occurred. + x:, + # The y-coordinate where the double click occurred. + y_:, + # Specifies the event type. For a double click action, this property is always set + # to `double_click`. + type: :double_click + ) + end + + sig { override.returns({ type: Symbol, x: Integer, y_: Integer }) } + def to_hash + end + end + + class Drag < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Drag, + OpenAI::Internal::AnyHash + ) + end + + # An array of coordinates representing the path of the drag action. Coordinates + # will appear as an array of objects, eg + # + # ``` + # [ + # { x: 100, y: 200 }, + # { x: 200, y: 300 } + # ] + # ``` + sig do + returns( + T::Array[ + OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path + ] + ) + end + attr_accessor :path + + # Specifies the event type. For a drag action, this property is always set to + # `drag`. + sig { returns(Symbol) } + attr_accessor :type + + # A drag action. + sig do + params( + path: + T::Array[ + OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path::OrHash + ], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # An array of coordinates representing the path of the drag action. Coordinates + # will appear as an array of objects, eg + # + # ``` + # [ + # { x: 100, y: 200 }, + # { x: 200, y: 300 } + # ] + # ``` + path:, + # Specifies the event type. For a drag action, this property is always set to + # `drag`. + type: :drag + ) + end + + sig do + override.returns( + { + path: + T::Array[ + OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path + ], + type: Symbol + } + ) + end + def to_hash + end + + class Path < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path, + OpenAI::Internal::AnyHash + ) + end + + # The x-coordinate. + sig { returns(Integer) } + attr_accessor :x + + # The y-coordinate. + sig { returns(Integer) } + attr_accessor :y_ + + # A series of x/y coordinate pairs in the drag path. + sig { params(x: Integer, y_: Integer).returns(T.attached_class) } + def self.new( + # The x-coordinate. + x:, + # The y-coordinate. + y_: + ) + end + + sig { override.returns({ x: Integer, y_: Integer }) } + def to_hash + end + end + end + + class Keypress < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Keypress, + OpenAI::Internal::AnyHash + ) + end + + # The combination of keys the model is requesting to be pressed. This is an array + # of strings, each representing a key. + sig { returns(T::Array[String]) } + attr_accessor :keys + + # Specifies the event type. For a keypress action, this property is always set to + # `keypress`. + sig { returns(Symbol) } + attr_accessor :type + + # A collection of keypresses the model would like to perform. + sig do + params(keys: T::Array[String], type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The combination of keys the model is requesting to be pressed. This is an array + # of strings, each representing a key. + keys:, + # Specifies the event type. For a keypress action, this property is always set to + # `keypress`. + type: :keypress + ) + end + + sig { override.returns({ keys: T::Array[String], type: Symbol }) } + def to_hash + end + end + + class Move < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Move, + OpenAI::Internal::AnyHash + ) + end + + # Specifies the event type. For a move action, this property is always set to + # `move`. + sig { returns(Symbol) } + attr_accessor :type + + # The x-coordinate to move to. + sig { returns(Integer) } + attr_accessor :x + + # The y-coordinate to move to. + sig { returns(Integer) } + attr_accessor :y_ + + # A mouse move action. + sig do + params(x: Integer, y_: Integer, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The x-coordinate to move to. + x:, + # The y-coordinate to move to. + y_:, + # Specifies the event type. For a move action, this property is always set to + # `move`. + type: :move + ) + end + + sig { override.returns({ type: Symbol, x: Integer, y_: Integer }) } + def to_hash + end + end + + class Screenshot < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Screenshot, + OpenAI::Internal::AnyHash + ) + end + + # Specifies the event type. For a screenshot action, this property is always set + # to `screenshot`. + sig { returns(Symbol) } + attr_accessor :type + + # A screenshot action. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Specifies the event type. For a screenshot action, this property is always set + # to `screenshot`. + type: :screenshot + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + class Scroll < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Scroll, + OpenAI::Internal::AnyHash + ) + end + + # The horizontal scroll distance. + sig { returns(Integer) } + attr_accessor :scroll_x + + # The vertical scroll distance. + sig { returns(Integer) } + attr_accessor :scroll_y + + # Specifies the event type. For a scroll action, this property is always set to + # `scroll`. + sig { returns(Symbol) } + attr_accessor :type + + # The x-coordinate where the scroll occurred. + sig { returns(Integer) } + attr_accessor :x + + # The y-coordinate where the scroll occurred. + sig { returns(Integer) } + attr_accessor :y_ + + # A scroll action. + sig do + params( + scroll_x: Integer, + scroll_y: Integer, + x: Integer, + y_: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The horizontal scroll distance. + scroll_x:, + # The vertical scroll distance. + scroll_y:, + # The x-coordinate where the scroll occurred. + x:, + # The y-coordinate where the scroll occurred. + y_:, + # Specifies the event type. For a scroll action, this property is always set to + # `scroll`. + type: :scroll + ) + end + + sig do + override.returns( + { + scroll_x: Integer, + scroll_y: Integer, + type: Symbol, + x: Integer, + y_: Integer + } + ) + end + def to_hash + end + end + + class Type < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Type, + OpenAI::Internal::AnyHash + ) + end + + # The text to type. + sig { returns(String) } + attr_accessor :text + + # Specifies the event type. For a type action, this property is always set to + # `type`. + sig { returns(Symbol) } + attr_accessor :type + + # An action to type in text. + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The text to type. + text:, + # Specifies the event type. For a type action, this property is always set to + # `type`. + type: :type + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class Wait < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::Action::Wait, + OpenAI::Internal::AnyHash + ) + end + + # Specifies the event type. For a wait action, this property is always set to + # `wait`. + sig { returns(Symbol) } + attr_accessor :type + + # A wait action. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Specifies the event type. For a wait action, this property is always set to + # `wait`. + type: :wait + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseComputerToolCall::Action::Variants + ] + ) + end + def self.variants + end + end + + class PendingSafetyCheck < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the pending safety check. + sig { returns(String) } + attr_accessor :id + + # The type of the pending safety check. + sig { returns(String) } + attr_accessor :code + + # Details about the pending safety check. + sig { returns(String) } + attr_accessor :message + + # A pending safety check for the computer call. + sig do + params(id: String, code: String, message: String).returns( + T.attached_class + ) + end + def self.new( + # The ID of the pending safety check. + id:, + # The type of the pending safety check. + code:, + # Details about the pending safety check. + message: + ) + end + + sig do + override.returns({ id: String, code: String, message: String }) + end + def to_hash + end + end + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseComputerToolCall::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseComputerToolCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseComputerToolCall::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseComputerToolCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseComputerToolCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the computer call. Always `computer_call`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseComputerToolCall::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + COMPUTER_CALL = + T.let( + :computer_call, + OpenAI::Responses::ResponseComputerToolCall::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseComputerToolCall::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_computer_tool_call_output_item.rbi b/rbi/openai/models/responses/response_computer_tool_call_output_item.rbi new file mode 100644 index 00000000..c56f05d6 --- /dev/null +++ b/rbi/openai/models/responses/response_computer_tool_call_output_item.rbi @@ -0,0 +1,223 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseComputerToolCallOutputItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCallOutputItem, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the computer call tool output. + sig { returns(String) } + attr_accessor :id + + # The ID of the computer tool call that produced the output. + sig { returns(String) } + attr_accessor :call_id + + # A computer screenshot image used with the computer use tool. + sig do + returns(OpenAI::Responses::ResponseComputerToolCallOutputScreenshot) + end + attr_reader :output + + sig do + params( + output: + OpenAI::Responses::ResponseComputerToolCallOutputScreenshot::OrHash + ).void + end + attr_writer :output + + # The type of the computer tool call output. Always `computer_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The safety checks reported by the API that have been acknowledged by the + # developer. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck + ] + ) + ) + end + attr_reader :acknowledged_safety_checks + + sig do + params( + acknowledged_safety_checks: + T::Array[ + OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck::OrHash + ] + ).void + end + attr_writer :acknowledged_safety_checks + + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseComputerToolCallOutputItem::Status::TaggedSymbol + ) + ) + end + attr_reader :status + + sig do + params( + status: + OpenAI::Responses::ResponseComputerToolCallOutputItem::Status::OrSymbol + ).void + end + attr_writer :status + + sig do + params( + id: String, + call_id: String, + output: + OpenAI::Responses::ResponseComputerToolCallOutputScreenshot::OrHash, + acknowledged_safety_checks: + T::Array[ + OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck::OrHash + ], + status: + OpenAI::Responses::ResponseComputerToolCallOutputItem::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the computer call tool output. + id:, + # The ID of the computer tool call that produced the output. + call_id:, + # A computer screenshot image used with the computer use tool. + output:, + # The safety checks reported by the API that have been acknowledged by the + # developer. + acknowledged_safety_checks: nil, + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + status: nil, + # The type of the computer tool call output. Always `computer_call_output`. + type: :computer_call_output + ) + end + + sig do + override.returns( + { + id: String, + call_id: String, + output: + OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + type: Symbol, + acknowledged_safety_checks: + T::Array[ + OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck + ], + status: + OpenAI::Responses::ResponseComputerToolCallOutputItem::Status::TaggedSymbol + } + ) + end + def to_hash + end + + class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the pending safety check. + sig { returns(String) } + attr_accessor :id + + # The type of the pending safety check. + sig { returns(String) } + attr_accessor :code + + # Details about the pending safety check. + sig { returns(String) } + attr_accessor :message + + # A pending safety check for the computer call. + sig do + params(id: String, code: String, message: String).returns( + T.attached_class + ) + end + def self.new( + # The ID of the pending safety check. + id:, + # The type of the pending safety check. + code:, + # Details about the pending safety check. + message: + ) + end + + sig do + override.returns({ id: String, code: String, message: String }) + end + def to_hash + end + end + + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseComputerToolCallOutputItem::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseComputerToolCallOutputItem::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseComputerToolCallOutputItem::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseComputerToolCallOutputItem::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseComputerToolCallOutputItem::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_computer_tool_call_output_screenshot.rbi b/rbi/openai/models/responses/response_computer_tool_call_output_screenshot.rbi new file mode 100644 index 00000000..fc13ccfe --- /dev/null +++ b/rbi/openai/models/responses/response_computer_tool_call_output_screenshot.rbi @@ -0,0 +1,59 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseComputerToolCallOutputScreenshot < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + OpenAI::Internal::AnyHash + ) + end + + # Specifies the event type. For a computer screenshot, this property is always set + # to `computer_screenshot`. + sig { returns(Symbol) } + attr_accessor :type + + # The identifier of an uploaded file that contains the screenshot. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + # The URL of the screenshot image. + sig { returns(T.nilable(String)) } + attr_reader :image_url + + sig { params(image_url: String).void } + attr_writer :image_url + + # A computer screenshot image used with the computer use tool. + sig do + params(file_id: String, image_url: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The identifier of an uploaded file that contains the screenshot. + file_id: nil, + # The URL of the screenshot image. + image_url: nil, + # Specifies the event type. For a computer screenshot, this property is always set + # to `computer_screenshot`. + type: :computer_screenshot + ) + end + + sig do + override.returns({ type: Symbol, file_id: String, image_url: String }) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_content.rbi b/rbi/openai/models/responses/response_content.rbi new file mode 100644 index 00000000..126e783d --- /dev/null +++ b/rbi/openai/models/responses/response_content.rbi @@ -0,0 +1,31 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # Multi-modal input and output contents. + module ResponseContent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputText, + OpenAI::Responses::ResponseInputImage, + OpenAI::Responses::ResponseInputFile, + OpenAI::Responses::ResponseOutputText, + OpenAI::Responses::ResponseOutputRefusal + ) + end + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponseContent::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_content_part_added_event.rbi b/rbi/openai/models/responses/response_content_part_added_event.rbi new file mode 100644 index 00000000..82ef446c --- /dev/null +++ b/rbi/openai/models/responses/response_content_part_added_event.rbi @@ -0,0 +1,115 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseContentPartAddedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseContentPartAddedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The index of the content part that was added. + sig { returns(Integer) } + attr_accessor :content_index + + # The ID of the output item that the content part was added to. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the content part was added to. + sig { returns(Integer) } + attr_accessor :output_index + + # The content part that was added. + sig do + returns( + OpenAI::Responses::ResponseContentPartAddedEvent::Part::Variants + ) + end + attr_accessor :part + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.content_part.added`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a new content part is added. + sig do + params( + content_index: Integer, + item_id: String, + output_index: Integer, + part: + T.any( + OpenAI::Responses::ResponseOutputText::OrHash, + OpenAI::Responses::ResponseOutputRefusal::OrHash + ), + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the content part that was added. + content_index:, + # The ID of the output item that the content part was added to. + item_id:, + # The index of the output item that the content part was added to. + output_index:, + # The content part that was added. + part:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.content_part.added`. + type: :"response.content_part.added" + ) + end + + sig do + override.returns( + { + content_index: Integer, + item_id: String, + output_index: Integer, + part: + OpenAI::Responses::ResponseContentPartAddedEvent::Part::Variants, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + + # The content part that was added. + module Part + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText, + OpenAI::Responses::ResponseOutputRefusal + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseContentPartAddedEvent::Part::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_content_part_done_event.rbi b/rbi/openai/models/responses/response_content_part_done_event.rbi new file mode 100644 index 00000000..51be81c7 --- /dev/null +++ b/rbi/openai/models/responses/response_content_part_done_event.rbi @@ -0,0 +1,115 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseContentPartDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseContentPartDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The index of the content part that is done. + sig { returns(Integer) } + attr_accessor :content_index + + # The ID of the output item that the content part was added to. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the content part was added to. + sig { returns(Integer) } + attr_accessor :output_index + + # The content part that is done. + sig do + returns( + OpenAI::Responses::ResponseContentPartDoneEvent::Part::Variants + ) + end + attr_accessor :part + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.content_part.done`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a content part is done. + sig do + params( + content_index: Integer, + item_id: String, + output_index: Integer, + part: + T.any( + OpenAI::Responses::ResponseOutputText::OrHash, + OpenAI::Responses::ResponseOutputRefusal::OrHash + ), + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the content part that is done. + content_index:, + # The ID of the output item that the content part was added to. + item_id:, + # The index of the output item that the content part was added to. + output_index:, + # The content part that is done. + part:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.content_part.done`. + type: :"response.content_part.done" + ) + end + + sig do + override.returns( + { + content_index: Integer, + item_id: String, + output_index: Integer, + part: + OpenAI::Responses::ResponseContentPartDoneEvent::Part::Variants, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + + # The content part that is done. + module Part + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText, + OpenAI::Responses::ResponseOutputRefusal + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseContentPartDoneEvent::Part::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_conversation_param.rbi b/rbi/openai/models/responses/response_conversation_param.rbi new file mode 100644 index 00000000..e4b1df7f --- /dev/null +++ b/rbi/openai/models/responses/response_conversation_param.rbi @@ -0,0 +1,33 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseConversationParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseConversationParam, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the conversation. + sig { returns(String) } + attr_accessor :id + + # The conversation that this response belongs to. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the conversation. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_create_params.rbi b/rbi/openai/models/responses/response_create_params.rbi new file mode 100644 index 00000000..3fa43c5b --- /dev/null +++ b/rbi/openai/models/responses/response_create_params.rbi @@ -0,0 +1,939 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # Whether to run the model response in the background. + # [Learn more](https://platform.openai.com/docs/guides/background). + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :background + + # The conversation that this response belongs to. Items from this conversation are + # prepended to `input_items` for this response request. Input items and output + # items from this response are automatically added to this conversation after this + # response completes. + sig do + returns( + T.nilable( + T.any(String, OpenAI::Responses::ResponseConversationParam) + ) + ) + end + attr_accessor :conversation + + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). + sig do + returns( + T.nilable(T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]) + ) + end + attr_accessor :include + + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + sig do + returns( + T.nilable(OpenAI::Responses::ResponseCreateParams::Input::Variants) + ) + end + attr_reader :input + + sig do + params( + input: OpenAI::Responses::ResponseCreateParams::Input::Variants + ).void + end + attr_writer :input + + # A system (or developer) message inserted into the model's context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. + sig { returns(T.nilable(String)) } + attr_accessor :instructions + + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + sig { returns(T.nilable(Integer)) } + attr_accessor :max_output_tokens + + # The maximum number of total calls to built-in tools that can be processed in a + # response. This maximum number applies across all built-in tool calls, not per + # individual tool. Any further attempts to call a tool by the model will be + # ignored. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_tool_calls + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + sig do + returns( + T.nilable( + T.any( + String, + OpenAI::ChatModel::OrSymbol, + OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol + ) + ) + ) + end + attr_reader :model + + sig do + params( + model: + T.any( + String, + OpenAI::ChatModel::OrSymbol, + OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol + ) + ).void + end + attr_writer :model + + # Whether to allow the model to run tool calls in parallel. + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :parallel_tool_calls + + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # Cannot be used in conjunction with `conversation`. + sig { returns(T.nilable(String)) } + attr_accessor :previous_response_id + + # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + sig { returns(T.nilable(OpenAI::Responses::ResponsePrompt)) } + attr_reader :prompt + + sig do + params( + prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash) + ).void + end + attr_writer :prompt + + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + sig { returns(T.nilable(String)) } + attr_reader :prompt_cache_key + + sig { params(prompt_cache_key: String).void } + attr_writer :prompt_cache_key + + # **gpt-5 and o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + sig { returns(T.nilable(OpenAI::Reasoning)) } + attr_reader :reasoning + + sig { params(reasoning: T.nilable(OpenAI::Reasoning::OrHash)).void } + attr_writer :reasoning + + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + sig { returns(T.nilable(String)) } + attr_reader :safety_identifier + + sig { params(safety_identifier: String).void } + attr_writer :safety_identifier + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol + ) + ) + end + attr_accessor :service_tier + + # Whether to store the generated model response for later retrieval via API. + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :store + + # Options for streaming responses. Only set this when you set `stream: true`. + sig do + returns( + T.nilable(OpenAI::Responses::ResponseCreateParams::StreamOptions) + ) + end + attr_reader :stream_options + + sig do + params( + stream_options: + T.nilable( + OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash + ) + ).void + end + attr_writer :stream_options + + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. + sig { returns(T.nilable(Float)) } + attr_accessor :temperature + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig { returns(T.nilable(OpenAI::Responses::ResponseTextConfig)) } + attr_reader :text + + sig { params(text: OpenAI::Responses::ResponseTextConfig::OrHash).void } + attr_writer :text + + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. + sig do + returns( + T.nilable( + T.any( + OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed, + OpenAI::Responses::ToolChoiceTypes, + OpenAI::Responses::ToolChoiceFunction, + OpenAI::Responses::ToolChoiceMcp, + OpenAI::Responses::ToolChoiceCustom + ) + ) + ) + end + attr_reader :tool_choice + + sig do + params( + tool_choice: + T.any( + OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, + OpenAI::Responses::ToolChoiceTypes::OrHash, + OpenAI::Responses::ToolChoiceFunction::OrHash, + OpenAI::Responses::ToolChoiceMcp::OrHash, + OpenAI::Responses::ToolChoiceCustom::OrHash + ) + ).void + end + attr_writer :tool_choice + + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Responses::FunctionTool, + OpenAI::Responses::FileSearchTool, + OpenAI::Responses::ComputerTool, + OpenAI::Responses::Tool::Mcp, + OpenAI::Responses::Tool::CodeInterpreter, + OpenAI::Responses::Tool::ImageGeneration, + OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::CustomTool, + OpenAI::Responses::WebSearchTool + ) + ] + ) + ) + end + attr_reader :tools + + sig do + params( + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ] + ).void + end + attr_writer :tools + + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + sig { returns(T.nilable(Integer)) } + attr_accessor :top_logprobs + + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + sig { returns(T.nilable(Float)) } + attr_accessor :top_p + + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseCreateParams::Truncation::OrSymbol + ) + ) + end + attr_accessor :truncation + + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + sig { returns(T.nilable(String)) } + attr_reader :user + + sig { params(user: String).void } + attr_writer :user + + sig do + params( + background: T.nilable(T::Boolean), + conversation: + T.nilable( + T.any( + String, + OpenAI::Responses::ResponseConversationParam::OrHash + ) + ), + include: + T.nilable( + T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] + ), + input: OpenAI::Responses::ResponseCreateParams::Input::Variants, + instructions: T.nilable(String), + max_output_tokens: T.nilable(Integer), + max_tool_calls: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: + T.any( + String, + OpenAI::ChatModel::OrSymbol, + OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol + ), + parallel_tool_calls: T.nilable(T::Boolean), + previous_response_id: T.nilable(String), + prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash), + prompt_cache_key: String, + reasoning: T.nilable(OpenAI::Reasoning::OrHash), + safety_identifier: String, + service_tier: + T.nilable( + OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol + ), + store: T.nilable(T::Boolean), + stream_options: + T.nilable( + OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash + ), + temperature: T.nilable(Float), + text: OpenAI::Responses::ResponseTextConfig::OrHash, + tool_choice: + T.any( + OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, + OpenAI::Responses::ToolChoiceTypes::OrHash, + OpenAI::Responses::ToolChoiceFunction::OrHash, + OpenAI::Responses::ToolChoiceMcp::OrHash, + OpenAI::Responses::ToolChoiceCustom::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ], + top_logprobs: T.nilable(Integer), + top_p: T.nilable(Float), + truncation: + T.nilable( + OpenAI::Responses::ResponseCreateParams::Truncation::OrSymbol + ), + user: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Whether to run the model response in the background. + # [Learn more](https://platform.openai.com/docs/guides/background). + background: nil, + # The conversation that this response belongs to. Items from this conversation are + # prepended to `input_items` for this response request. Input items and output + # items from this response are automatically added to this conversation after this + # response completes. + conversation: nil, + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). + include: nil, + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + input: nil, + # A system (or developer) message inserted into the model's context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. + instructions: nil, + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_output_tokens: nil, + # The maximum number of total calls to built-in tools that can be processed in a + # response. This maximum number applies across all built-in tool calls, not per + # individual tool. Any further attempts to call a tool by the model will be + # ignored. + max_tool_calls: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + model: nil, + # Whether to allow the model to run tool calls in parallel. + parallel_tool_calls: nil, + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # Cannot be used in conjunction with `conversation`. + previous_response_id: nil, + # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt: nil, + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_key: nil, + # **gpt-5 and o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + reasoning: nil, + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + safety_identifier: nil, + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + service_tier: nil, + # Whether to store the generated model response for later retrieval via API. + store: nil, + # Options for streaming responses. Only set this when you set `stream: true`. + stream_options: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. + temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + text: nil, + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. + tool_choice: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. + tools: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + top_p: nil, + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. + truncation: nil, + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + user: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + background: T.nilable(T::Boolean), + conversation: + T.nilable( + T.any(String, OpenAI::Responses::ResponseConversationParam) + ), + include: + T.nilable( + T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] + ), + input: OpenAI::Responses::ResponseCreateParams::Input::Variants, + instructions: T.nilable(String), + max_output_tokens: T.nilable(Integer), + max_tool_calls: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: + T.any( + String, + OpenAI::ChatModel::OrSymbol, + OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol + ), + parallel_tool_calls: T.nilable(T::Boolean), + previous_response_id: T.nilable(String), + prompt: T.nilable(OpenAI::Responses::ResponsePrompt), + prompt_cache_key: String, + reasoning: T.nilable(OpenAI::Reasoning), + safety_identifier: String, + service_tier: + T.nilable( + OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol + ), + store: T.nilable(T::Boolean), + stream_options: + T.nilable( + OpenAI::Responses::ResponseCreateParams::StreamOptions + ), + temperature: T.nilable(Float), + text: OpenAI::Responses::ResponseTextConfig, + tool_choice: + T.any( + OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed, + OpenAI::Responses::ToolChoiceTypes, + OpenAI::Responses::ToolChoiceFunction, + OpenAI::Responses::ToolChoiceMcp, + OpenAI::Responses::ToolChoiceCustom + ), + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool, + OpenAI::Responses::FileSearchTool, + OpenAI::Responses::ComputerTool, + OpenAI::Responses::Tool::Mcp, + OpenAI::Responses::Tool::CodeInterpreter, + OpenAI::Responses::Tool::ImageGeneration, + OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::CustomTool, + OpenAI::Responses::WebSearchTool + ) + ], + top_logprobs: T.nilable(Integer), + top_p: T.nilable(Float), + truncation: + T.nilable( + OpenAI::Responses::ResponseCreateParams::Truncation::OrSymbol + ), + user: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # The conversation that this response belongs to. Items from this conversation are + # prepended to `input_items` for this response request. Input items and output + # items from this response are automatically added to this conversation after this + # response completes. + module Conversation + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any(String, OpenAI::Responses::ResponseConversationParam) + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseCreateParams::Conversation::Variants + ] + ) + end + def self.variants + end + end + + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + module Input + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + T::Array[OpenAI::Responses::ResponseInputItem::Variants] + ) + end + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponseCreateParams::Input::Variants] + ) + end + def self.variants + end + end + + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + module ServiceTier + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseCreateParams::ServiceTier + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol + ) + DEFAULT = + T.let( + :default, + OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol + ) + FLEX = + T.let( + :flex, + OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol + ) + SCALE = + T.let( + :scale, + OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol + ) + PRIORITY = + T.let( + :priority, + OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol + ] + ) + end + def self.values + end + end + + class StreamOptions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCreateParams::StreamOptions, + OpenAI::Internal::AnyHash + ) + end + + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :include_obfuscation + + sig { params(include_obfuscation: T::Boolean).void } + attr_writer :include_obfuscation + + # Options for streaming responses. Only set this when you set `stream: true`. + sig do + params(include_obfuscation: T::Boolean).returns(T.attached_class) + end + def self.new( + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + include_obfuscation: nil + ) + end + + sig { override.returns({ include_obfuscation: T::Boolean }) } + def to_hash + end + end + + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. + module ToolChoice + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ToolChoiceOptions::TaggedSymbol, + OpenAI::Responses::ToolChoiceAllowed, + OpenAI::Responses::ToolChoiceTypes, + OpenAI::Responses::ToolChoiceFunction, + OpenAI::Responses::ToolChoiceMcp, + OpenAI::Responses::ToolChoiceCustom + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseCreateParams::ToolChoice::Variants + ] + ) + end + def self.variants + end + end + + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. + module Truncation + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseCreateParams::Truncation) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Responses::ResponseCreateParams::Truncation::TaggedSymbol + ) + DISABLED = + T.let( + :disabled, + OpenAI::Responses::ResponseCreateParams::Truncation::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseCreateParams::Truncation::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_created_event.rbi b/rbi/openai/models/responses/response_created_event.rbi new file mode 100644 index 00000000..ce2a5cff --- /dev/null +++ b/rbi/openai/models/responses/response_created_event.rbi @@ -0,0 +1,62 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCreatedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCreatedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The response that was created. + sig { returns(OpenAI::Responses::Response) } + attr_reader :response + + sig { params(response: OpenAI::Responses::Response::OrHash).void } + attr_writer :response + + # The sequence number for this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.created`. + sig { returns(Symbol) } + attr_accessor :type + + # An event that is emitted when a response is created. + sig do + params( + response: OpenAI::Responses::Response::OrHash, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The response that was created. + response:, + # The sequence number for this event. + sequence_number:, + # The type of the event. Always `response.created`. + type: :"response.created" + ) + end + + sig do + override.returns( + { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_custom_tool_call.rbi b/rbi/openai/models/responses/response_custom_tool_call.rbi new file mode 100644 index 00000000..89eb7f74 --- /dev/null +++ b/rbi/openai/models/responses/response_custom_tool_call.rbi @@ -0,0 +1,78 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCustomToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCustomToolCall, + OpenAI::Internal::AnyHash + ) + end + + # An identifier used to map this custom tool call to a tool call output. + sig { returns(String) } + attr_accessor :call_id + + # The input for the custom tool call generated by the model. + sig { returns(String) } + attr_accessor :input + + # The name of the custom tool being called. + sig { returns(String) } + attr_accessor :name + + # The type of the custom tool call. Always `custom_tool_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the custom tool call in the OpenAI platform. + sig { returns(T.nilable(String)) } + attr_reader :id + + sig { params(id: String).void } + attr_writer :id + + # A call to a custom tool created by the model. + sig do + params( + call_id: String, + input: String, + name: String, + id: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # An identifier used to map this custom tool call to a tool call output. + call_id:, + # The input for the custom tool call generated by the model. + input:, + # The name of the custom tool being called. + name:, + # The unique ID of the custom tool call in the OpenAI platform. + id: nil, + # The type of the custom tool call. Always `custom_tool_call`. + type: :custom_tool_call + ) + end + + sig do + override.returns( + { + call_id: String, + input: String, + name: String, + type: Symbol, + id: String + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi b/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi new file mode 100644 index 00000000..dbc175f0 --- /dev/null +++ b/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi @@ -0,0 +1,75 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallInputDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCustomToolCallInputDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The incremental input data (delta) for the custom tool call. + sig { returns(String) } + attr_accessor :delta + + # Unique identifier for the API item associated with this event. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output this delta applies to. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The event type identifier. + sig { returns(Symbol) } + attr_accessor :type + + # Event representing a delta (partial update) to the input of a custom tool call. + sig do + params( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The incremental input data (delta) for the custom tool call. + delta:, + # Unique identifier for the API item associated with this event. + item_id:, + # The index of the output this delta applies to. + output_index:, + # The sequence number of this event. + sequence_number:, + # The event type identifier. + type: :"response.custom_tool_call_input.delta" + ) + end + + sig do + override.returns( + { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi b/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi new file mode 100644 index 00000000..cd3c485d --- /dev/null +++ b/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi @@ -0,0 +1,75 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallInputDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCustomToolCallInputDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The complete input data for the custom tool call. + sig { returns(String) } + attr_accessor :input + + # Unique identifier for the API item associated with this event. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output this event applies to. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The event type identifier. + sig { returns(Symbol) } + attr_accessor :type + + # Event indicating that input for a custom tool call is complete. + sig do + params( + input: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The complete input data for the custom tool call. + input:, + # Unique identifier for the API item associated with this event. + item_id:, + # The index of the output this event applies to. + output_index:, + # The sequence number of this event. + sequence_number:, + # The event type identifier. + type: :"response.custom_tool_call_input.done" + ) + end + + sig do + override.returns( + { + input: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_custom_tool_call_output.rbi b/rbi/openai/models/responses/response_custom_tool_call_output.rbi new file mode 100644 index 00000000..b18c6a16 --- /dev/null +++ b/rbi/openai/models/responses/response_custom_tool_call_output.rbi @@ -0,0 +1,65 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCustomToolCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The call ID, used to map this custom tool call output to a custom tool call. + sig { returns(String) } + attr_accessor :call_id + + # The output from the custom tool call generated by your code. + sig { returns(String) } + attr_accessor :output + + # The type of the custom tool call output. Always `custom_tool_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the custom tool call output in the OpenAI platform. + sig { returns(T.nilable(String)) } + attr_reader :id + + sig { params(id: String).void } + attr_writer :id + + # The output of a custom tool call from your code, being sent back to the model. + sig do + params( + call_id: String, + output: String, + id: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The call ID, used to map this custom tool call output to a custom tool call. + call_id:, + # The output from the custom tool call generated by your code. + output:, + # The unique ID of the custom tool call output in the OpenAI platform. + id: nil, + # The type of the custom tool call output. Always `custom_tool_call_output`. + type: :custom_tool_call_output + ) + end + + sig do + override.returns( + { call_id: String, output: String, type: Symbol, id: String } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_delete_params.rbi b/rbi/openai/models/responses/response_delete_params.rbi new file mode 100644 index 00000000..6d3d35bd --- /dev/null +++ b/rbi/openai/models/responses/response_delete_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseDeleteParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_error.rbi b/rbi/openai/models/responses/response_error.rbi new file mode 100644 index 00000000..b27ac5dd --- /dev/null +++ b/rbi/openai/models/responses/response_error.rbi @@ -0,0 +1,158 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseError < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::ResponseError, OpenAI::Internal::AnyHash) + end + + # The error code for the response. + sig { returns(OpenAI::Responses::ResponseError::Code::TaggedSymbol) } + attr_accessor :code + + # A human-readable description of the error. + sig { returns(String) } + attr_accessor :message + + # An error object returned when the model fails to generate a Response. + sig do + params( + code: OpenAI::Responses::ResponseError::Code::OrSymbol, + message: String + ).returns(T.attached_class) + end + def self.new( + # The error code for the response. + code:, + # A human-readable description of the error. + message: + ) + end + + sig do + override.returns( + { + code: OpenAI::Responses::ResponseError::Code::TaggedSymbol, + message: String + } + ) + end + def to_hash + end + + # The error code for the response. + module Code + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseError::Code) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SERVER_ERROR = + T.let( + :server_error, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + RATE_LIMIT_EXCEEDED = + T.let( + :rate_limit_exceeded, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + INVALID_PROMPT = + T.let( + :invalid_prompt, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + VECTOR_STORE_TIMEOUT = + T.let( + :vector_store_timeout, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + INVALID_IMAGE = + T.let( + :invalid_image, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + INVALID_IMAGE_FORMAT = + T.let( + :invalid_image_format, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + INVALID_BASE64_IMAGE = + T.let( + :invalid_base64_image, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + INVALID_IMAGE_URL = + T.let( + :invalid_image_url, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + IMAGE_TOO_LARGE = + T.let( + :image_too_large, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + IMAGE_TOO_SMALL = + T.let( + :image_too_small, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + IMAGE_PARSE_ERROR = + T.let( + :image_parse_error, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + IMAGE_CONTENT_POLICY_VIOLATION = + T.let( + :image_content_policy_violation, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + INVALID_IMAGE_MODE = + T.let( + :invalid_image_mode, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + IMAGE_FILE_TOO_LARGE = + T.let( + :image_file_too_large, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + UNSUPPORTED_IMAGE_MEDIA_TYPE = + T.let( + :unsupported_image_media_type, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + EMPTY_IMAGE_FILE = + T.let( + :empty_image_file, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + FAILED_TO_DOWNLOAD_IMAGE = + T.let( + :failed_to_download_image, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + IMAGE_FILE_NOT_FOUND = + T.let( + :image_file_not_found, + OpenAI::Responses::ResponseError::Code::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponseError::Code::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_error_event.rbi b/rbi/openai/models/responses/response_error_event.rbi new file mode 100644 index 00000000..b651b4d8 --- /dev/null +++ b/rbi/openai/models/responses/response_error_event.rbi @@ -0,0 +1,75 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseErrorEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseErrorEvent, + OpenAI::Internal::AnyHash + ) + end + + # The error code. + sig { returns(T.nilable(String)) } + attr_accessor :code + + # The error message. + sig { returns(String) } + attr_accessor :message + + # The error parameter. + sig { returns(T.nilable(String)) } + attr_accessor :param + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `error`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when an error occurs. + sig do + params( + code: T.nilable(String), + message: String, + param: T.nilable(String), + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The error code. + code:, + # The error message. + message:, + # The error parameter. + param:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `error`. + type: :error + ) + end + + sig do + override.returns( + { + code: T.nilable(String), + message: String, + param: T.nilable(String), + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_failed_event.rbi b/rbi/openai/models/responses/response_failed_event.rbi new file mode 100644 index 00000000..8ac778a5 --- /dev/null +++ b/rbi/openai/models/responses/response_failed_event.rbi @@ -0,0 +1,62 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFailedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFailedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The response that failed. + sig { returns(OpenAI::Responses::Response) } + attr_reader :response + + sig { params(response: OpenAI::Responses::Response::OrHash).void } + attr_writer :response + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.failed`. + sig { returns(Symbol) } + attr_accessor :type + + # An event that is emitted when a response fails. + sig do + params( + response: OpenAI::Responses::Response::OrHash, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The response that failed. + response:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.failed`. + type: :"response.failed" + ) + end + + sig do + override.returns( + { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_file_search_call_completed_event.rbi b/rbi/openai/models/responses/response_file_search_call_completed_event.rbi new file mode 100644 index 00000000..dec63c3d --- /dev/null +++ b/rbi/openai/models/responses/response_file_search_call_completed_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFileSearchCallCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFileSearchCallCompletedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the output item that the file search call is initiated. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the file search call is initiated. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.file_search_call.completed`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a file search call is completed (results found). + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the output item that the file search call is initiated. + item_id:, + # The index of the output item that the file search call is initiated. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.file_search_call.completed`. + type: :"response.file_search_call.completed" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_file_search_call_in_progress_event.rbi b/rbi/openai/models/responses/response_file_search_call_in_progress_event.rbi new file mode 100644 index 00000000..e77a9b13 --- /dev/null +++ b/rbi/openai/models/responses/response_file_search_call_in_progress_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFileSearchCallInProgressEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFileSearchCallInProgressEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the output item that the file search call is initiated. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the file search call is initiated. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.file_search_call.in_progress`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a file search call is initiated. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the output item that the file search call is initiated. + item_id:, + # The index of the output item that the file search call is initiated. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.file_search_call.in_progress`. + type: :"response.file_search_call.in_progress" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_file_search_call_searching_event.rbi b/rbi/openai/models/responses/response_file_search_call_searching_event.rbi new file mode 100644 index 00000000..5a9be332 --- /dev/null +++ b/rbi/openai/models/responses/response_file_search_call_searching_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFileSearchCallSearchingEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFileSearchCallSearchingEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the output item that the file search call is initiated. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the file search call is searching. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.file_search_call.searching`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a file search is currently searching. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the output item that the file search call is initiated. + item_id:, + # The index of the output item that the file search call is searching. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.file_search_call.searching`. + type: :"response.file_search_call.searching" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_file_search_tool_call.rbi b/rbi/openai/models/responses/response_file_search_tool_call.rbi new file mode 100644 index 00000000..c6864d65 --- /dev/null +++ b/rbi/openai/models/responses/response_file_search_tool_call.rbi @@ -0,0 +1,276 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFileSearchToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFileSearchToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the file search tool call. + sig { returns(String) } + attr_accessor :id + + # The queries used to search for files. + sig { returns(T::Array[String]) } + attr_accessor :queries + + # The status of the file search tool call. One of `in_progress`, `searching`, + # `incomplete` or `failed`, + sig do + returns( + OpenAI::Responses::ResponseFileSearchToolCall::Status::OrSymbol + ) + end + attr_accessor :status + + # The type of the file search tool call. Always `file_search_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The results of the file search tool call. + sig do + returns( + T.nilable( + T::Array[OpenAI::Responses::ResponseFileSearchToolCall::Result] + ) + ) + end + attr_accessor :results + + # The results of a file search tool call. See the + # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + # for more information. + sig do + params( + id: String, + queries: T::Array[String], + status: + OpenAI::Responses::ResponseFileSearchToolCall::Status::OrSymbol, + results: + T.nilable( + T::Array[ + OpenAI::Responses::ResponseFileSearchToolCall::Result::OrHash + ] + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the file search tool call. + id:, + # The queries used to search for files. + queries:, + # The status of the file search tool call. One of `in_progress`, `searching`, + # `incomplete` or `failed`, + status:, + # The results of the file search tool call. + results: nil, + # The type of the file search tool call. Always `file_search_call`. + type: :file_search_call + ) + end + + sig do + override.returns( + { + id: String, + queries: T::Array[String], + status: + OpenAI::Responses::ResponseFileSearchToolCall::Status::OrSymbol, + type: Symbol, + results: + T.nilable( + T::Array[ + OpenAI::Responses::ResponseFileSearchToolCall::Result + ] + ) + } + ) + end + def to_hash + end + + # The status of the file search tool call. One of `in_progress`, `searching`, + # `incomplete` or `failed`, + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseFileSearchToolCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseFileSearchToolCall::Status::TaggedSymbol + ) + SEARCHING = + T.let( + :searching, + OpenAI::Responses::ResponseFileSearchToolCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseFileSearchToolCall::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseFileSearchToolCall::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::Responses::ResponseFileSearchToolCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseFileSearchToolCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + + class Result < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFileSearchToolCall::Result, + OpenAI::Internal::AnyHash + ) + end + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + sig do + returns( + T.nilable( + T::Hash[ + Symbol, + OpenAI::Responses::ResponseFileSearchToolCall::Result::Attribute::Variants + ] + ) + ) + end + attr_accessor :attributes + + # The unique ID of the file. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + # The name of the file. + sig { returns(T.nilable(String)) } + attr_reader :filename + + sig { params(filename: String).void } + attr_writer :filename + + # The relevance score of the file - a value between 0 and 1. + sig { returns(T.nilable(Float)) } + attr_reader :score + + sig { params(score: Float).void } + attr_writer :score + + # The text that was retrieved from the file. + sig { returns(T.nilable(String)) } + attr_reader :text + + sig { params(text: String).void } + attr_writer :text + + sig do + params( + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::Responses::ResponseFileSearchToolCall::Result::Attribute::Variants + ] + ), + file_id: String, + filename: String, + score: Float, + text: String + ).returns(T.attached_class) + end + def self.new( + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The unique ID of the file. + file_id: nil, + # The name of the file. + filename: nil, + # The relevance score of the file - a value between 0 and 1. + score: nil, + # The text that was retrieved from the file. + text: nil + ) + end + + sig do + override.returns( + { + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::Responses::ResponseFileSearchToolCall::Result::Attribute::Variants + ] + ), + file_id: String, + filename: String, + score: Float, + text: String + } + ) + end + def to_hash + end + + module Attribute + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(String, Float, T::Boolean) } + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseFileSearchToolCall::Result::Attribute::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_format_text_config.rbi b/rbi/openai/models/responses/response_format_text_config.rbi new file mode 100644 index 00000000..2c1186ac --- /dev/null +++ b/rbi/openai/models/responses/response_format_text_config.rbi @@ -0,0 +1,41 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + module ResponseFormatTextConfig + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::ResponseFormatText, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig, + OpenAI::ResponseFormatJSONObject + ) + end + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponseFormatTextConfig::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_format_text_json_schema_config.rbi b/rbi/openai/models/responses/response_format_text_json_schema_config.rbi new file mode 100644 index 00000000..0b27d712 --- /dev/null +++ b/rbi/openai/models/responses/response_format_text_json_schema_config.rbi @@ -0,0 +1,94 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFormatTextJSONSchemaConfig < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig, + OpenAI::Internal::AnyHash + ) + end + + # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + # and dashes, with a maximum length of 64. + sig { returns(String) } + attr_accessor :name + + # The schema for the response format, described as a JSON Schema object. Learn how + # to build JSON schemas [here](https://json-schema.org/). + sig { returns(T::Hash[Symbol, T.anything]) } + attr_accessor :schema + + # The type of response format being defined. Always `json_schema`. + sig { returns(Symbol) } + attr_accessor :type + + # A description of what the response format is for, used by the model to determine + # how to respond in the format. + sig { returns(T.nilable(String)) } + attr_reader :description + + sig { params(description: String).void } + attr_writer :description + + # Whether to enable strict schema adherence when generating the output. If set to + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + sig { returns(T.nilable(T::Boolean)) } + attr_accessor :strict + + # JSON Schema response format. Used to generate structured JSON responses. Learn + # more about + # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + sig do + params( + name: String, + schema: T::Hash[Symbol, T.anything], + description: String, + strict: T.nilable(T::Boolean), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + # and dashes, with a maximum length of 64. + name:, + # The schema for the response format, described as a JSON Schema object. Learn how + # to build JSON schemas [here](https://json-schema.org/). + schema:, + # A description of what the response format is for, used by the model to determine + # how to respond in the format. + description: nil, + # Whether to enable strict schema adherence when generating the output. If set to + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + strict: nil, + # The type of response format being defined. Always `json_schema`. + type: :json_schema + ) + end + + sig do + override.returns( + { + name: String, + schema: T::Hash[Symbol, T.anything], + type: Symbol, + description: String, + strict: T.nilable(T::Boolean) + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_function_call_arguments_delta_event.rbi b/rbi/openai/models/responses/response_function_call_arguments_delta_event.rbi new file mode 100644 index 00000000..eb59905d --- /dev/null +++ b/rbi/openai/models/responses/response_function_call_arguments_delta_event.rbi @@ -0,0 +1,75 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionCallArgumentsDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The function-call arguments delta that is added. + sig { returns(String) } + attr_accessor :delta + + # The ID of the output item that the function-call arguments delta is added to. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the function-call arguments delta is added to. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.function_call_arguments.delta`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when there is a partial function-call arguments delta. + sig do + params( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The function-call arguments delta that is added. + delta:, + # The ID of the output item that the function-call arguments delta is added to. + item_id:, + # The index of the output item that the function-call arguments delta is added to. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.function_call_arguments.delta`. + type: :"response.function_call_arguments.delta" + ) + end + + sig do + override.returns( + { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_function_call_arguments_done_event.rbi b/rbi/openai/models/responses/response_function_call_arguments_done_event.rbi new file mode 100644 index 00000000..dac56326 --- /dev/null +++ b/rbi/openai/models/responses/response_function_call_arguments_done_event.rbi @@ -0,0 +1,73 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFunctionCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionCallArgumentsDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The function-call arguments. + sig { returns(String) } + attr_accessor :arguments + + # The ID of the item. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when function-call arguments are finalized. + sig do + params( + arguments: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The function-call arguments. + arguments:, + # The ID of the item. + item_id:, + # The index of the output item. + output_index:, + # The sequence number of this event. + sequence_number:, + type: :"response.function_call_arguments.done" + ) + end + + sig do + override.returns( + { + arguments: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_function_tool_call.rbi b/rbi/openai/models/responses/response_function_tool_call.rbi new file mode 100644 index 00000000..5e98ba1a --- /dev/null +++ b/rbi/openai/models/responses/response_function_tool_call.rbi @@ -0,0 +1,144 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFunctionToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionToolCall, + OpenAI::Internal::AnyHash + ) + end + + # A JSON string of the arguments to pass to the function. + sig { returns(String) } + attr_accessor :arguments + + # The unique ID of the function tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The name of the function to run. + sig { returns(String) } + attr_accessor :name + + # The type of the function tool call. Always `function_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the function tool call. + sig { returns(T.nilable(String)) } + attr_reader :id + + sig { params(id: String).void } + attr_writer :id + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseFunctionToolCall::Status::OrSymbol + ) + ) + end + attr_reader :status + + sig do + params( + status: + OpenAI::Responses::ResponseFunctionToolCall::Status::OrSymbol + ).void + end + attr_writer :status + + # A tool call to run a function. See the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling) + # for more information. + sig do + params( + arguments: String, + call_id: String, + name: String, + id: String, + status: + OpenAI::Responses::ResponseFunctionToolCall::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A JSON string of the arguments to pass to the function. + arguments:, + # The unique ID of the function tool call generated by the model. + call_id:, + # The name of the function to run. + name:, + # The unique ID of the function tool call. + id: nil, + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + status: nil, + # The type of the function tool call. Always `function_call`. + type: :function_call + ) + end + + sig do + override.returns( + { + arguments: String, + call_id: String, + name: String, + type: Symbol, + id: String, + status: + OpenAI::Responses::ResponseFunctionToolCall::Status::OrSymbol + } + ) + end + def to_hash + end + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseFunctionToolCall::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseFunctionToolCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseFunctionToolCall::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseFunctionToolCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseFunctionToolCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_function_tool_call_item.rbi b/rbi/openai/models/responses/response_function_tool_call_item.rbi new file mode 100644 index 00000000..514a0568 --- /dev/null +++ b/rbi/openai/models/responses/response_function_tool_call_item.rbi @@ -0,0 +1,35 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFunctionToolCallItem < OpenAI::Models::Responses::ResponseFunctionToolCall + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionToolCallItem, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the function tool call. + sig { returns(String) } + attr_accessor :id + + # A tool call to run a function. See the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling) + # for more information. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the function tool call. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_function_tool_call_output_item.rbi b/rbi/openai/models/responses/response_function_tool_call_output_item.rbi new file mode 100644 index 00000000..b3820b0d --- /dev/null +++ b/rbi/openai/models/responses/response_function_tool_call_output_item.rbi @@ -0,0 +1,133 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFunctionToolCallOutputItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionToolCallOutputItem, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the function call tool output. + sig { returns(String) } + attr_accessor :id + + # The unique ID of the function tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # A JSON string of the output of the function tool call. + sig { returns(String) } + attr_accessor :output + + # The type of the function tool call output. Always `function_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status::TaggedSymbol + ) + ) + end + attr_reader :status + + sig do + params( + status: + OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status::OrSymbol + ).void + end + attr_writer :status + + sig do + params( + id: String, + call_id: String, + output: String, + status: + OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the function call tool output. + id:, + # The unique ID of the function tool call generated by the model. + call_id:, + # A JSON string of the output of the function tool call. + output:, + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + status: nil, + # The type of the function tool call output. Always `function_call_output`. + type: :function_call_output + ) + end + + sig do + override.returns( + { + id: String, + call_id: String, + output: String, + type: Symbol, + status: + OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status::TaggedSymbol + } + ) + end + def to_hash + end + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_function_web_search.rbi b/rbi/openai/models/responses/response_function_web_search.rbi new file mode 100644 index 00000000..5e7a4263 --- /dev/null +++ b/rbi/openai/models/responses/response_function_web_search.rbi @@ -0,0 +1,275 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseFunctionWebSearch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionWebSearch, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the web search tool call. + sig { returns(String) } + attr_accessor :id + + # An object describing the specific action taken in this web search call. Includes + # details on how the model used the web (search, open_page, find). + sig do + returns( + T.any( + OpenAI::Responses::ResponseFunctionWebSearch::Action::Search, + OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage, + OpenAI::Responses::ResponseFunctionWebSearch::Action::Find + ) + ) + end + attr_accessor :action + + # The status of the web search tool call. + sig do + returns( + OpenAI::Responses::ResponseFunctionWebSearch::Status::OrSymbol + ) + end + attr_accessor :status + + # The type of the web search tool call. Always `web_search_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The results of a web search tool call. See the + # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + # more information. + sig do + params( + id: String, + action: + T.any( + OpenAI::Responses::ResponseFunctionWebSearch::Action::Search::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::Action::Find::OrHash + ), + status: + OpenAI::Responses::ResponseFunctionWebSearch::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the web search tool call. + id:, + # An object describing the specific action taken in this web search call. Includes + # details on how the model used the web (search, open_page, find). + action:, + # The status of the web search tool call. + status:, + # The type of the web search tool call. Always `web_search_call`. + type: :web_search_call + ) + end + + sig do + override.returns( + { + id: String, + action: + T.any( + OpenAI::Responses::ResponseFunctionWebSearch::Action::Search, + OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage, + OpenAI::Responses::ResponseFunctionWebSearch::Action::Find + ), + status: + OpenAI::Responses::ResponseFunctionWebSearch::Status::OrSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # An object describing the specific action taken in this web search call. Includes + # details on how the model used the web (search, open_page, find). + module Action + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionWebSearch::Action::Search, + OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage, + OpenAI::Responses::ResponseFunctionWebSearch::Action::Find + ) + end + + class Search < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionWebSearch::Action::Search, + OpenAI::Internal::AnyHash + ) + end + + # The search query. + sig { returns(String) } + attr_accessor :query + + # The action type. + sig { returns(Symbol) } + attr_accessor :type + + # Action type "search" - Performs a web search query. + sig do + params(query: String, type: Symbol).returns(T.attached_class) + end + def self.new( + # The search query. + query:, + # The action type. + type: :search + ) + end + + sig { override.returns({ query: String, type: Symbol }) } + def to_hash + end + end + + class OpenPage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage, + OpenAI::Internal::AnyHash + ) + end + + # The action type. + sig { returns(Symbol) } + attr_accessor :type + + # The URL opened by the model. + sig { returns(String) } + attr_accessor :url + + # Action type "open_page" - Opens a specific URL from search results. + sig { params(url: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The URL opened by the model. + url:, + # The action type. + type: :open_page + ) + end + + sig { override.returns({ type: Symbol, url: String }) } + def to_hash + end + end + + class Find < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseFunctionWebSearch::Action::Find, + OpenAI::Internal::AnyHash + ) + end + + # The pattern or text to search for within the page. + sig { returns(String) } + attr_accessor :pattern + + # The action type. + sig { returns(Symbol) } + attr_accessor :type + + # The URL of the page searched for the pattern. + sig { returns(String) } + attr_accessor :url + + # Action type "find": Searches for a pattern within a loaded page. + sig do + params(pattern: String, url: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The pattern or text to search for within the page. + pattern:, + # The URL of the page searched for the pattern. + url:, + # The action type. + type: :find + ) + end + + sig do + override.returns({ pattern: String, type: Symbol, url: String }) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseFunctionWebSearch::Action::Variants + ] + ) + end + def self.variants + end + end + + # The status of the web search tool call. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseFunctionWebSearch::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseFunctionWebSearch::Status::TaggedSymbol + ) + SEARCHING = + T.let( + :searching, + OpenAI::Responses::ResponseFunctionWebSearch::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseFunctionWebSearch::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::Responses::ResponseFunctionWebSearch::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseFunctionWebSearch::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_image_gen_call_completed_event.rbi b/rbi/openai/models/responses/response_image_gen_call_completed_event.rbi new file mode 100644 index 00000000..176049a4 --- /dev/null +++ b/rbi/openai/models/responses/response_image_gen_call_completed_event.rbi @@ -0,0 +1,68 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseImageGenCallCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseImageGenCallCompletedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique identifier of the image generation item being processed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response's output array. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.image_generation_call.completed'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when an image generation tool call has completed and the final image is + # available. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique identifier of the image generation item being processed. + item_id:, + # The index of the output item in the response's output array. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always 'response.image_generation_call.completed'. + type: :"response.image_generation_call.completed" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_image_gen_call_generating_event.rbi b/rbi/openai/models/responses/response_image_gen_call_generating_event.rbi new file mode 100644 index 00000000..ece2928d --- /dev/null +++ b/rbi/openai/models/responses/response_image_gen_call_generating_event.rbi @@ -0,0 +1,68 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseImageGenCallGeneratingEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseImageGenCallGeneratingEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique identifier of the image generation item being processed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response's output array. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of the image generation item being processed. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.image_generation_call.generating'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when an image generation tool call is actively generating an image + # (intermediate state). + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique identifier of the image generation item being processed. + item_id:, + # The index of the output item in the response's output array. + output_index:, + # The sequence number of the image generation item being processed. + sequence_number:, + # The type of the event. Always 'response.image_generation_call.generating'. + type: :"response.image_generation_call.generating" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_image_gen_call_in_progress_event.rbi b/rbi/openai/models/responses/response_image_gen_call_in_progress_event.rbi new file mode 100644 index 00000000..7bf0d0e4 --- /dev/null +++ b/rbi/openai/models/responses/response_image_gen_call_in_progress_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseImageGenCallInProgressEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseImageGenCallInProgressEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique identifier of the image generation item being processed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response's output array. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of the image generation item being processed. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.image_generation_call.in_progress'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when an image generation tool call is in progress. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique identifier of the image generation item being processed. + item_id:, + # The index of the output item in the response's output array. + output_index:, + # The sequence number of the image generation item being processed. + sequence_number:, + # The type of the event. Always 'response.image_generation_call.in_progress'. + type: :"response.image_generation_call.in_progress" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_image_gen_call_partial_image_event.rbi b/rbi/openai/models/responses/response_image_gen_call_partial_image_event.rbi new file mode 100644 index 00000000..b5f9909a --- /dev/null +++ b/rbi/openai/models/responses/response_image_gen_call_partial_image_event.rbi @@ -0,0 +1,85 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseImageGenCallPartialImageEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseImageGenCallPartialImageEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique identifier of the image generation item being processed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response's output array. + sig { returns(Integer) } + attr_accessor :output_index + + # Base64-encoded partial image data, suitable for rendering as an image. + sig { returns(String) } + attr_accessor :partial_image_b64 + + # 0-based index for the partial image (backend is 1-based, but this is 0-based for + # the user). + sig { returns(Integer) } + attr_accessor :partial_image_index + + # The sequence number of the image generation item being processed. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.image_generation_call.partial_image'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a partial image is available during image generation streaming. + sig do + params( + item_id: String, + output_index: Integer, + partial_image_b64: String, + partial_image_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique identifier of the image generation item being processed. + item_id:, + # The index of the output item in the response's output array. + output_index:, + # Base64-encoded partial image data, suitable for rendering as an image. + partial_image_b64:, + # 0-based index for the partial image (backend is 1-based, but this is 0-based for + # the user). + partial_image_index:, + # The sequence number of the image generation item being processed. + sequence_number:, + # The type of the event. Always 'response.image_generation_call.partial_image'. + type: :"response.image_generation_call.partial_image" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + partial_image_b64: String, + partial_image_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_in_progress_event.rbi b/rbi/openai/models/responses/response_in_progress_event.rbi new file mode 100644 index 00000000..c3dd5e40 --- /dev/null +++ b/rbi/openai/models/responses/response_in_progress_event.rbi @@ -0,0 +1,62 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseInProgressEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInProgressEvent, + OpenAI::Internal::AnyHash + ) + end + + # The response that is in progress. + sig { returns(OpenAI::Responses::Response) } + attr_reader :response + + sig { params(response: OpenAI::Responses::Response::OrHash).void } + attr_writer :response + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.in_progress`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the response is in progress. + sig do + params( + response: OpenAI::Responses::Response::OrHash, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The response that is in progress. + response:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.in_progress`. + type: :"response.in_progress" + ) + end + + sig do + override.returns( + { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_includable.rbi b/rbi/openai/models/responses/response_includable.rbi new file mode 100644 index 00000000..1007c6fd --- /dev/null +++ b/rbi/openai/models/responses/response_includable.rbi @@ -0,0 +1,70 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). + module ResponseIncludable + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Responses::ResponseIncludable) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + CODE_INTERPRETER_CALL_OUTPUTS = + T.let( + :"code_interpreter_call.outputs", + OpenAI::Responses::ResponseIncludable::TaggedSymbol + ) + COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = + T.let( + :"computer_call_output.output.image_url", + OpenAI::Responses::ResponseIncludable::TaggedSymbol + ) + FILE_SEARCH_CALL_RESULTS = + T.let( + :"file_search_call.results", + OpenAI::Responses::ResponseIncludable::TaggedSymbol + ) + MESSAGE_INPUT_IMAGE_IMAGE_URL = + T.let( + :"message.input_image.image_url", + OpenAI::Responses::ResponseIncludable::TaggedSymbol + ) + MESSAGE_OUTPUT_TEXT_LOGPROBS = + T.let( + :"message.output_text.logprobs", + OpenAI::Responses::ResponseIncludable::TaggedSymbol + ) + REASONING_ENCRYPTED_CONTENT = + T.let( + :"reasoning.encrypted_content", + OpenAI::Responses::ResponseIncludable::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponseIncludable::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_incomplete_event.rbi b/rbi/openai/models/responses/response_incomplete_event.rbi new file mode 100644 index 00000000..591c46e8 --- /dev/null +++ b/rbi/openai/models/responses/response_incomplete_event.rbi @@ -0,0 +1,62 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseIncompleteEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseIncompleteEvent, + OpenAI::Internal::AnyHash + ) + end + + # The response that was incomplete. + sig { returns(OpenAI::Responses::Response) } + attr_reader :response + + sig { params(response: OpenAI::Responses::Response::OrHash).void } + attr_writer :response + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.incomplete`. + sig { returns(Symbol) } + attr_accessor :type + + # An event that is emitted when a response finishes as incomplete. + sig do + params( + response: OpenAI::Responses::Response::OrHash, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The response that was incomplete. + response:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.incomplete`. + type: :"response.incomplete" + ) + end + + sig do + override.returns( + { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_input.rbi b/rbi/openai/models/responses/response_input.rbi new file mode 100644 index 00000000..48025693 --- /dev/null +++ b/rbi/openai/models/responses/response_input.rbi @@ -0,0 +1,15 @@ +# typed: strong + +module OpenAI + module Models + module Responses + ResponseInput = + T.let( + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::Responses::ResponseInputItem + ], + OpenAI::Internal::Type::Converter + ) + end + end +end diff --git a/rbi/openai/models/responses/response_input_audio.rbi b/rbi/openai/models/responses/response_input_audio.rbi new file mode 100644 index 00000000..6b771d48 --- /dev/null +++ b/rbi/openai/models/responses/response_input_audio.rbi @@ -0,0 +1,91 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseInputAudio < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputAudio, + OpenAI::Internal::AnyHash + ) + end + + # Base64-encoded audio data. + sig { returns(String) } + attr_accessor :data + + # The format of the audio data. Currently supported formats are `mp3` and `wav`. + sig { returns(OpenAI::Responses::ResponseInputAudio::Format::OrSymbol) } + attr_accessor :format_ + + # The type of the input item. Always `input_audio`. + sig { returns(Symbol) } + attr_accessor :type + + # An audio input to the model. + sig do + params( + data: String, + format_: OpenAI::Responses::ResponseInputAudio::Format::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Base64-encoded audio data. + data:, + # The format of the audio data. Currently supported formats are `mp3` and `wav`. + format_:, + # The type of the input item. Always `input_audio`. + type: :input_audio + ) + end + + sig do + override.returns( + { + data: String, + format_: OpenAI::Responses::ResponseInputAudio::Format::OrSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The format of the audio data. Currently supported formats are `mp3` and `wav`. + module Format + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseInputAudio::Format) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MP3 = + T.let( + :mp3, + OpenAI::Responses::ResponseInputAudio::Format::TaggedSymbol + ) + WAV = + T.let( + :wav, + OpenAI::Responses::ResponseInputAudio::Format::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputAudio::Format::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_input_content.rbi b/rbi/openai/models/responses/response_input_content.rbi new file mode 100644 index 00000000..f18545cc --- /dev/null +++ b/rbi/openai/models/responses/response_input_content.rbi @@ -0,0 +1,29 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # A text input to the model. + module ResponseInputContent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputText, + OpenAI::Responses::ResponseInputImage, + OpenAI::Responses::ResponseInputFile + ) + end + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponseInputContent::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_input_file.rbi b/rbi/openai/models/responses/response_input_file.rbi new file mode 100644 index 00000000..bfa515c2 --- /dev/null +++ b/rbi/openai/models/responses/response_input_file.rbi @@ -0,0 +1,84 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseInputFile < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputFile, + OpenAI::Internal::AnyHash + ) + end + + # The type of the input item. Always `input_file`. + sig { returns(Symbol) } + attr_accessor :type + + # The content of the file to be sent to the model. + sig { returns(T.nilable(String)) } + attr_reader :file_data + + sig { params(file_data: String).void } + attr_writer :file_data + + # The ID of the file to be sent to the model. + sig { returns(T.nilable(String)) } + attr_accessor :file_id + + # The URL of the file to be sent to the model. + sig { returns(T.nilable(String)) } + attr_reader :file_url + + sig { params(file_url: String).void } + attr_writer :file_url + + # The name of the file to be sent to the model. + sig { returns(T.nilable(String)) } + attr_reader :filename + + sig { params(filename: String).void } + attr_writer :filename + + # A file input to the model. + sig do + params( + file_data: String, + file_id: T.nilable(String), + file_url: String, + filename: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The content of the file to be sent to the model. + file_data: nil, + # The ID of the file to be sent to the model. + file_id: nil, + # The URL of the file to be sent to the model. + file_url: nil, + # The name of the file to be sent to the model. + filename: nil, + # The type of the input item. Always `input_file`. + type: :input_file + ) + end + + sig do + override.returns( + { + type: Symbol, + file_data: String, + file_id: T.nilable(String), + file_url: String, + filename: String + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_input_image.rbi b/rbi/openai/models/responses/response_input_image.rbi new file mode 100644 index 00000000..a553503b --- /dev/null +++ b/rbi/openai/models/responses/response_input_image.rbi @@ -0,0 +1,110 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseInputImage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputImage, + OpenAI::Internal::AnyHash + ) + end + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + sig { returns(OpenAI::Responses::ResponseInputImage::Detail::OrSymbol) } + attr_accessor :detail + + # The type of the input item. Always `input_image`. + sig { returns(Symbol) } + attr_accessor :type + + # The ID of the file to be sent to the model. + sig { returns(T.nilable(String)) } + attr_accessor :file_id + + # The URL of the image to be sent to the model. A fully qualified URL or base64 + # encoded image in a data URL. + sig { returns(T.nilable(String)) } + attr_accessor :image_url + + # An image input to the model. Learn about + # [image inputs](https://platform.openai.com/docs/guides/vision). + sig do + params( + detail: OpenAI::Responses::ResponseInputImage::Detail::OrSymbol, + file_id: T.nilable(String), + image_url: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + detail:, + # The ID of the file to be sent to the model. + file_id: nil, + # The URL of the image to be sent to the model. A fully qualified URL or base64 + # encoded image in a data URL. + image_url: nil, + # The type of the input item. Always `input_image`. + type: :input_image + ) + end + + sig do + override.returns( + { + detail: OpenAI::Responses::ResponseInputImage::Detail::OrSymbol, + type: Symbol, + file_id: T.nilable(String), + image_url: T.nilable(String) + } + ) + end + def to_hash + end + + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. + module Detail + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseInputImage::Detail) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let( + :low, + OpenAI::Responses::ResponseInputImage::Detail::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Responses::ResponseInputImage::Detail::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::Responses::ResponseInputImage::Detail::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputImage::Detail::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_input_item.rbi b/rbi/openai/models/responses/response_input_item.rbi new file mode 100644 index 00000000..6b28bab2 --- /dev/null +++ b/rbi/openai/models/responses/response_input_item.rbi @@ -0,0 +1,1501 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. + module ResponseInputItem + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::EasyInputMessage, + OpenAI::Responses::ResponseInputItem::Message, + OpenAI::Responses::ResponseOutputMessage, + OpenAI::Responses::ResponseFileSearchToolCall, + OpenAI::Responses::ResponseComputerToolCall, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput, + OpenAI::Responses::ResponseFunctionWebSearch, + OpenAI::Responses::ResponseFunctionToolCall, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput, + OpenAI::Responses::ResponseReasoningItem, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall, + OpenAI::Responses::ResponseCodeInterpreterToolCall, + OpenAI::Responses::ResponseInputItem::LocalShellCall, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Responses::ResponseInputItem::McpListTools, + OpenAI::Responses::ResponseInputItem::McpApprovalRequest, + OpenAI::Responses::ResponseInputItem::McpApprovalResponse, + OpenAI::Responses::ResponseInputItem::McpCall, + OpenAI::Responses::ResponseCustomToolCallOutput, + OpenAI::Responses::ResponseCustomToolCall, + OpenAI::Responses::ResponseInputItem::ItemReference + ) + end + + class Message < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::Message, + OpenAI::Internal::AnyHash + ) + end + + # A list of one or many input items to the model, containing different content + # types. + sig do + returns( + T::Array[ + T.any( + OpenAI::Responses::ResponseInputText, + OpenAI::Responses::ResponseInputImage, + OpenAI::Responses::ResponseInputFile + ) + ] + ) + end + attr_accessor :content + + # The role of the message input. One of `user`, `system`, or `developer`. + sig do + returns( + OpenAI::Responses::ResponseInputItem::Message::Role::OrSymbol + ) + end + attr_accessor :role + + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseInputItem::Message::Status::OrSymbol + ) + ) + end + attr_reader :status + + sig do + params( + status: + OpenAI::Responses::ResponseInputItem::Message::Status::OrSymbol + ).void + end + attr_writer :status + + # The type of the message input. Always set to `message`. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseInputItem::Message::Type::OrSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: + OpenAI::Responses::ResponseInputItem::Message::Type::OrSymbol + ).void + end + attr_writer :type + + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. + sig do + params( + content: + T::Array[ + T.any( + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Responses::ResponseInputImage::OrHash, + OpenAI::Responses::ResponseInputFile::OrHash + ) + ], + role: + OpenAI::Responses::ResponseInputItem::Message::Role::OrSymbol, + status: + OpenAI::Responses::ResponseInputItem::Message::Status::OrSymbol, + type: + OpenAI::Responses::ResponseInputItem::Message::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # A list of one or many input items to the model, containing different content + # types. + content:, + # The role of the message input. One of `user`, `system`, or `developer`. + role:, + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + status: nil, + # The type of the message input. Always set to `message`. + type: nil + ) + end + + sig do + override.returns( + { + content: + T::Array[ + T.any( + OpenAI::Responses::ResponseInputText, + OpenAI::Responses::ResponseInputImage, + OpenAI::Responses::ResponseInputFile + ) + ], + role: + OpenAI::Responses::ResponseInputItem::Message::Role::OrSymbol, + status: + OpenAI::Responses::ResponseInputItem::Message::Status::OrSymbol, + type: + OpenAI::Responses::ResponseInputItem::Message::Type::OrSymbol + } + ) + end + def to_hash + end + + # The role of the message input. One of `user`, `system`, or `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::Message::Role + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Responses::ResponseInputItem::Message::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Responses::ResponseInputItem::Message::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Responses::ResponseInputItem::Message::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::Message::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::Message::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseInputItem::Message::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseInputItem::Message::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseInputItem::Message::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::Message::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always set to `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::Message::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Responses::ResponseInputItem::Message::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::Message::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class ComputerCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ComputerCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the computer tool call that produced the output. + sig { returns(String) } + attr_accessor :call_id + + # A computer screenshot image used with the computer use tool. + sig do + returns(OpenAI::Responses::ResponseComputerToolCallOutputScreenshot) + end + attr_reader :output + + sig do + params( + output: + OpenAI::Responses::ResponseComputerToolCallOutputScreenshot::OrHash + ).void + end + attr_writer :output + + # The type of the computer tool call output. Always `computer_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The ID of the computer tool call output. + sig { returns(T.nilable(String)) } + attr_accessor :id + + # The safety checks reported by the API that have been acknowledged by the + # developer. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck + ] + ) + ) + end + attr_accessor :acknowledged_safety_checks + + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::Status::OrSymbol + ) + ) + end + attr_accessor :status + + # The output of a computer tool call. + sig do + params( + call_id: String, + output: + OpenAI::Responses::ResponseComputerToolCallOutputScreenshot::OrHash, + id: T.nilable(String), + acknowledged_safety_checks: + T.nilable( + T::Array[ + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck::OrHash + ] + ), + status: + T.nilable( + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::Status::OrSymbol + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the computer tool call that produced the output. + call_id:, + # A computer screenshot image used with the computer use tool. + output:, + # The ID of the computer tool call output. + id: nil, + # The safety checks reported by the API that have been acknowledged by the + # developer. + acknowledged_safety_checks: nil, + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + status: nil, + # The type of the computer tool call output. Always `computer_call_output`. + type: :computer_call_output + ) + end + + sig do + override.returns( + { + call_id: String, + output: + OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + type: Symbol, + id: T.nilable(String), + acknowledged_safety_checks: + T.nilable( + T::Array[ + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck + ] + ), + status: + T.nilable( + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::Status::OrSymbol + ) + } + ) + end + def to_hash + end + + class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the pending safety check. + sig { returns(String) } + attr_accessor :id + + # The type of the pending safety check. + sig { returns(T.nilable(String)) } + attr_accessor :code + + # Details about the pending safety check. + sig { returns(T.nilable(String)) } + attr_accessor :message + + # A pending safety check for the computer call. + sig do + params( + id: String, + code: T.nilable(String), + message: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + # The ID of the pending safety check. + id:, + # The type of the pending safety check. + code: nil, + # Details about the pending safety check. + message: nil + ) + end + + sig do + override.returns( + { + id: String, + code: T.nilable(String), + message: T.nilable(String) + } + ) + end + def to_hash + end + end + + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class FunctionCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::FunctionCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the function tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # A JSON string of the output of the function tool call. + sig { returns(String) } + attr_accessor :output + + # The type of the function tool call output. Always `function_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the function tool call output. Populated when this item is + # returned via API. + sig { returns(T.nilable(String)) } + attr_accessor :id + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Status::OrSymbol + ) + ) + end + attr_accessor :status + + # The output of a function tool call. + sig do + params( + call_id: String, + output: String, + id: T.nilable(String), + status: + T.nilable( + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Status::OrSymbol + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the function tool call generated by the model. + call_id:, + # A JSON string of the output of the function tool call. + output:, + # The unique ID of the function tool call output. Populated when this item is + # returned via API. + id: nil, + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + status: nil, + # The type of the function tool call output. Always `function_call_output`. + type: :function_call_output + ) + end + + sig do + override.returns( + { + call_id: String, + output: String, + type: Symbol, + id: T.nilable(String), + status: + T.nilable( + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Status::OrSymbol + ) + } + ) + end + def to_hash + end + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ImageGenerationCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the image generation call. + sig { returns(String) } + attr_accessor :id + + # The generated image encoded in base64. + sig { returns(T.nilable(String)) } + attr_accessor :result + + # The status of the image generation call. + sig do + returns( + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status::OrSymbol + ) + end + attr_accessor :status + + # The type of the image generation call. Always `image_generation_call`. + sig { returns(Symbol) } + attr_accessor :type + + # An image generation request made by the model. + sig do + params( + id: String, + result: T.nilable(String), + status: + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the image generation call. + id:, + # The generated image encoded in base64. + result:, + # The status of the image generation call. + status:, + # The type of the image generation call. Always `image_generation_call`. + type: :image_generation_call + ) + end + + sig do + override.returns( + { + id: String, + result: T.nilable(String), + status: + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status::OrSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The status of the image generation call. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status::TaggedSymbol + ) + GENERATING = + T.let( + :generating, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::LocalShellCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the local shell call. + sig { returns(String) } + attr_accessor :id + + # Execute a shell command on the server. + sig do + returns( + OpenAI::Responses::ResponseInputItem::LocalShellCall::Action + ) + end + attr_reader :action + + sig do + params( + action: + OpenAI::Responses::ResponseInputItem::LocalShellCall::Action::OrHash + ).void + end + attr_writer :action + + # The unique ID of the local shell tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The status of the local shell call. + sig do + returns( + OpenAI::Responses::ResponseInputItem::LocalShellCall::Status::OrSymbol + ) + end + attr_accessor :status + + # The type of the local shell call. Always `local_shell_call`. + sig { returns(Symbol) } + attr_accessor :type + + # A tool call to run a command on the local shell. + sig do + params( + id: String, + action: + OpenAI::Responses::ResponseInputItem::LocalShellCall::Action::OrHash, + call_id: String, + status: + OpenAI::Responses::ResponseInputItem::LocalShellCall::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the local shell call. + id:, + # Execute a shell command on the server. + action:, + # The unique ID of the local shell tool call generated by the model. + call_id:, + # The status of the local shell call. + status:, + # The type of the local shell call. Always `local_shell_call`. + type: :local_shell_call + ) + end + + sig do + override.returns( + { + id: String, + action: + OpenAI::Responses::ResponseInputItem::LocalShellCall::Action, + call_id: String, + status: + OpenAI::Responses::ResponseInputItem::LocalShellCall::Status::OrSymbol, + type: Symbol + } + ) + end + def to_hash + end + + class Action < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::LocalShellCall::Action, + OpenAI::Internal::AnyHash + ) + end + + # The command to run. + sig { returns(T::Array[String]) } + attr_accessor :command + + # Environment variables to set for the command. + sig { returns(T::Hash[Symbol, String]) } + attr_accessor :env + + # The type of the local shell action. Always `exec`. + sig { returns(Symbol) } + attr_accessor :type + + # Optional timeout in milliseconds for the command. + sig { returns(T.nilable(Integer)) } + attr_accessor :timeout_ms + + # Optional user to run the command as. + sig { returns(T.nilable(String)) } + attr_accessor :user + + # Optional working directory to run the command in. + sig { returns(T.nilable(String)) } + attr_accessor :working_directory + + # Execute a shell command on the server. + sig do + params( + command: T::Array[String], + env: T::Hash[Symbol, String], + timeout_ms: T.nilable(Integer), + user: T.nilable(String), + working_directory: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The command to run. + command:, + # Environment variables to set for the command. + env:, + # Optional timeout in milliseconds for the command. + timeout_ms: nil, + # Optional user to run the command as. + user: nil, + # Optional working directory to run the command in. + working_directory: nil, + # The type of the local shell action. Always `exec`. + type: :exec + ) + end + + sig do + override.returns( + { + command: T::Array[String], + env: T::Hash[Symbol, String], + type: Symbol, + timeout_ms: T.nilable(Integer), + user: T.nilable(String), + working_directory: T.nilable(String) + } + ) + end + def to_hash + end + end + + # The status of the local shell call. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::LocalShellCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseInputItem::LocalShellCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseInputItem::LocalShellCall::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseInputItem::LocalShellCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::LocalShellCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the local shell tool call generated by the model. + sig { returns(String) } + attr_accessor :id + + # A JSON string of the output of the local shell tool call. + sig { returns(String) } + attr_accessor :output + + # The type of the local shell tool call output. Always `local_shell_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::Status::OrSymbol + ) + ) + end + attr_accessor :status + + # The output of a local shell tool call. + sig do + params( + id: String, + output: String, + status: + T.nilable( + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::Status::OrSymbol + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the local shell tool call generated by the model. + id:, + # A JSON string of the output of the local shell tool call. + output:, + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + status: nil, + # The type of the local shell tool call output. Always `local_shell_call_output`. + type: :local_shell_call_output + ) + end + + sig do + override.returns( + { + id: String, + output: String, + type: Symbol, + status: + T.nilable( + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::Status::OrSymbol + ) + } + ) + end + def to_hash + end + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class McpListTools < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::McpListTools, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the list. + sig { returns(String) } + attr_accessor :id + + # The label of the MCP server. + sig { returns(String) } + attr_accessor :server_label + + # The tools available on the server. + sig do + returns( + T::Array[OpenAI::Responses::ResponseInputItem::McpListTools::Tool] + ) + end + attr_accessor :tools + + # The type of the item. Always `mcp_list_tools`. + sig { returns(Symbol) } + attr_accessor :type + + # Error message if the server could not list tools. + sig { returns(T.nilable(String)) } + attr_accessor :error + + # A list of tools available on an MCP server. + sig do + params( + id: String, + server_label: String, + tools: + T::Array[ + OpenAI::Responses::ResponseInputItem::McpListTools::Tool::OrHash + ], + error: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the list. + id:, + # The label of the MCP server. + server_label:, + # The tools available on the server. + tools:, + # Error message if the server could not list tools. + error: nil, + # The type of the item. Always `mcp_list_tools`. + type: :mcp_list_tools + ) + end + + sig do + override.returns( + { + id: String, + server_label: String, + tools: + T::Array[ + OpenAI::Responses::ResponseInputItem::McpListTools::Tool + ], + type: Symbol, + error: T.nilable(String) + } + ) + end + def to_hash + end + + class Tool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::McpListTools::Tool, + OpenAI::Internal::AnyHash + ) + end + + # The JSON schema describing the tool's input. + sig { returns(T.anything) } + attr_accessor :input_schema + + # The name of the tool. + sig { returns(String) } + attr_accessor :name + + # Additional annotations about the tool. + sig { returns(T.nilable(T.anything)) } + attr_accessor :annotations + + # The description of the tool. + sig { returns(T.nilable(String)) } + attr_accessor :description + + # A tool available on an MCP server. + sig do + params( + input_schema: T.anything, + name: String, + annotations: T.nilable(T.anything), + description: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + # The JSON schema describing the tool's input. + input_schema:, + # The name of the tool. + name:, + # Additional annotations about the tool. + annotations: nil, + # The description of the tool. + description: nil + ) + end + + sig do + override.returns( + { + input_schema: T.anything, + name: String, + annotations: T.nilable(T.anything), + description: T.nilable(String) + } + ) + end + def to_hash + end + end + end + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::McpApprovalRequest, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the approval request. + sig { returns(String) } + attr_accessor :id + + # A JSON string of arguments for the tool. + sig { returns(String) } + attr_accessor :arguments + + # The name of the tool to run. + sig { returns(String) } + attr_accessor :name + + # The label of the MCP server making the request. + sig { returns(String) } + attr_accessor :server_label + + # The type of the item. Always `mcp_approval_request`. + sig { returns(Symbol) } + attr_accessor :type + + # A request for human approval of a tool invocation. + sig do + params( + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the approval request. + id:, + # A JSON string of arguments for the tool. + arguments:, + # The name of the tool to run. + name:, + # The label of the MCP server making the request. + server_label:, + # The type of the item. Always `mcp_approval_request`. + type: :mcp_approval_request + ) + end + + sig do + override.returns( + { + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol + } + ) + end + def to_hash + end + end + + class McpApprovalResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::McpApprovalResponse, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the approval request being answered. + sig { returns(String) } + attr_accessor :approval_request_id + + # Whether the request was approved. + sig { returns(T::Boolean) } + attr_accessor :approve + + # The type of the item. Always `mcp_approval_response`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the approval response + sig { returns(T.nilable(String)) } + attr_accessor :id + + # Optional reason for the decision. + sig { returns(T.nilable(String)) } + attr_accessor :reason + + # A response to an MCP approval request. + sig do + params( + approval_request_id: String, + approve: T::Boolean, + id: T.nilable(String), + reason: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the approval request being answered. + approval_request_id:, + # Whether the request was approved. + approve:, + # The unique ID of the approval response + id: nil, + # Optional reason for the decision. + reason: nil, + # The type of the item. Always `mcp_approval_response`. + type: :mcp_approval_response + ) + end + + sig do + override.returns( + { + approval_request_id: String, + approve: T::Boolean, + type: Symbol, + id: T.nilable(String), + reason: T.nilable(String) + } + ) + end + def to_hash + end + end + + class McpCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::McpCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the tool call. + sig { returns(String) } + attr_accessor :id + + # A JSON string of the arguments passed to the tool. + sig { returns(String) } + attr_accessor :arguments + + # The name of the tool that was run. + sig { returns(String) } + attr_accessor :name + + # The label of the MCP server running the tool. + sig { returns(String) } + attr_accessor :server_label + + # The type of the item. Always `mcp_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The error from the tool call, if any. + sig { returns(T.nilable(String)) } + attr_accessor :error + + # The output from the tool call. + sig { returns(T.nilable(String)) } + attr_accessor :output + + # An invocation of a tool on an MCP server. + sig do + params( + id: String, + arguments: String, + name: String, + server_label: String, + error: T.nilable(String), + output: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the tool call. + id:, + # A JSON string of the arguments passed to the tool. + arguments:, + # The name of the tool that was run. + name:, + # The label of the MCP server running the tool. + server_label:, + # The error from the tool call, if any. + error: nil, + # The output from the tool call. + output: nil, + # The type of the item. Always `mcp_call`. + type: :mcp_call + ) + end + + sig do + override.returns( + { + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol, + error: T.nilable(String), + output: T.nilable(String) + } + ) + end + def to_hash + end + end + + class ItemReference < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputItem::ItemReference, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the item to reference. + sig { returns(String) } + attr_accessor :id + + # The type of item to reference. Always `item_reference`. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseInputItem::ItemReference::Type::OrSymbol + ) + ) + end + attr_accessor :type + + # An internal identifier for an item to reference. + sig do + params( + id: String, + type: + T.nilable( + OpenAI::Responses::ResponseInputItem::ItemReference::Type::OrSymbol + ) + ).returns(T.attached_class) + end + def self.new( + # The ID of the item to reference. + id:, + # The type of item to reference. Always `item_reference`. + type: nil + ) + end + + sig do + override.returns( + { + id: String, + type: + T.nilable( + OpenAI::Responses::ResponseInputItem::ItemReference::Type::OrSymbol + ) + } + ) + end + def to_hash + end + + # The type of item to reference. Always `item_reference`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseInputItem::ItemReference::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ITEM_REFERENCE = + T.let( + :item_reference, + OpenAI::Responses::ResponseInputItem::ItemReference::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputItem::ItemReference::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponseInputItem::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_input_message_content_list.rbi b/rbi/openai/models/responses/response_input_message_content_list.rbi new file mode 100644 index 00000000..15396775 --- /dev/null +++ b/rbi/openai/models/responses/response_input_message_content_list.rbi @@ -0,0 +1,15 @@ +# typed: strong + +module OpenAI + module Models + module Responses + ResponseInputMessageContentList = + T.let( + OpenAI::Internal::Type::ArrayOf[ + union: OpenAI::Responses::ResponseInputContent + ], + OpenAI::Internal::Type::Converter + ) + end + end +end diff --git a/rbi/openai/models/responses/response_input_message_item.rbi b/rbi/openai/models/responses/response_input_message_item.rbi new file mode 100644 index 00000000..f36d17e6 --- /dev/null +++ b/rbi/openai/models/responses/response_input_message_item.rbi @@ -0,0 +1,225 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseInputMessageItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputMessageItem, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the message input. + sig { returns(String) } + attr_accessor :id + + # A list of one or many input items to the model, containing different content + # types. + sig do + returns(T::Array[OpenAI::Responses::ResponseInputContent::Variants]) + end + attr_accessor :content + + # The role of the message input. One of `user`, `system`, or `developer`. + sig do + returns( + OpenAI::Responses::ResponseInputMessageItem::Role::TaggedSymbol + ) + end + attr_accessor :role + + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseInputMessageItem::Status::TaggedSymbol + ) + ) + end + attr_reader :status + + sig do + params( + status: + OpenAI::Responses::ResponseInputMessageItem::Status::OrSymbol + ).void + end + attr_writer :status + + # The type of the message input. Always set to `message`. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseInputMessageItem::Type::TaggedSymbol + ) + ) + end + attr_reader :type + + sig do + params( + type: OpenAI::Responses::ResponseInputMessageItem::Type::OrSymbol + ).void + end + attr_writer :type + + sig do + params( + id: String, + content: + T::Array[ + T.any( + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Responses::ResponseInputImage::OrHash, + OpenAI::Responses::ResponseInputFile::OrHash + ) + ], + role: OpenAI::Responses::ResponseInputMessageItem::Role::OrSymbol, + status: + OpenAI::Responses::ResponseInputMessageItem::Status::OrSymbol, + type: OpenAI::Responses::ResponseInputMessageItem::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the message input. + id:, + # A list of one or many input items to the model, containing different content + # types. + content:, + # The role of the message input. One of `user`, `system`, or `developer`. + role:, + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + status: nil, + # The type of the message input. Always set to `message`. + type: nil + ) + end + + sig do + override.returns( + { + id: String, + content: + T::Array[OpenAI::Responses::ResponseInputContent::Variants], + role: + OpenAI::Responses::ResponseInputMessageItem::Role::TaggedSymbol, + status: + OpenAI::Responses::ResponseInputMessageItem::Status::TaggedSymbol, + type: + OpenAI::Responses::ResponseInputMessageItem::Type::TaggedSymbol + } + ) + end + def to_hash + end + + # The role of the message input. One of `user`, `system`, or `developer`. + module Role + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseInputMessageItem::Role) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + USER = + T.let( + :user, + OpenAI::Responses::ResponseInputMessageItem::Role::TaggedSymbol + ) + SYSTEM = + T.let( + :system, + OpenAI::Responses::ResponseInputMessageItem::Role::TaggedSymbol + ) + DEVELOPER = + T.let( + :developer, + OpenAI::Responses::ResponseInputMessageItem::Role::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputMessageItem::Role::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseInputMessageItem::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseInputMessageItem::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseInputMessageItem::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseInputMessageItem::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputMessageItem::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The type of the message input. Always set to `message`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseInputMessageItem::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + MESSAGE = + T.let( + :message, + OpenAI::Responses::ResponseInputMessageItem::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseInputMessageItem::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_input_text.rbi b/rbi/openai/models/responses/response_input_text.rbi new file mode 100644 index 00000000..fddfbd40 --- /dev/null +++ b/rbi/openai/models/responses/response_input_text.rbi @@ -0,0 +1,39 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseInputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputText, + OpenAI::Internal::AnyHash + ) + end + + # The text input to the model. + sig { returns(String) } + attr_accessor :text + + # The type of the input item. Always `input_text`. + sig { returns(Symbol) } + attr_accessor :type + + # A text input to the model. + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The text input to the model. + text:, + # The type of the input item. Always `input_text`. + type: :input_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_item.rbi b/rbi/openai/models/responses/response_item.rbi new file mode 100644 index 00000000..9566555a --- /dev/null +++ b/rbi/openai/models/responses/response_item.rbi @@ -0,0 +1,820 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # Content item used to generate a response. + module ResponseItem + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseInputMessageItem, + OpenAI::Responses::ResponseOutputMessage, + OpenAI::Responses::ResponseFileSearchToolCall, + OpenAI::Responses::ResponseComputerToolCall, + OpenAI::Responses::ResponseComputerToolCallOutputItem, + OpenAI::Responses::ResponseFunctionWebSearch, + OpenAI::Responses::ResponseFunctionToolCallItem, + OpenAI::Responses::ResponseFunctionToolCallOutputItem, + OpenAI::Responses::ResponseItem::ImageGenerationCall, + OpenAI::Responses::ResponseCodeInterpreterToolCall, + OpenAI::Responses::ResponseItem::LocalShellCall, + OpenAI::Responses::ResponseItem::LocalShellCallOutput, + OpenAI::Responses::ResponseItem::McpListTools, + OpenAI::Responses::ResponseItem::McpApprovalRequest, + OpenAI::Responses::ResponseItem::McpApprovalResponse, + OpenAI::Responses::ResponseItem::McpCall + ) + end + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseItem::ImageGenerationCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the image generation call. + sig { returns(String) } + attr_accessor :id + + # The generated image encoded in base64. + sig { returns(T.nilable(String)) } + attr_accessor :result + + # The status of the image generation call. + sig do + returns( + OpenAI::Responses::ResponseItem::ImageGenerationCall::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The type of the image generation call. Always `image_generation_call`. + sig { returns(Symbol) } + attr_accessor :type + + # An image generation request made by the model. + sig do + params( + id: String, + result: T.nilable(String), + status: + OpenAI::Responses::ResponseItem::ImageGenerationCall::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the image generation call. + id:, + # The generated image encoded in base64. + result:, + # The status of the image generation call. + status:, + # The type of the image generation call. Always `image_generation_call`. + type: :image_generation_call + ) + end + + sig do + override.returns( + { + id: String, + result: T.nilable(String), + status: + OpenAI::Responses::ResponseItem::ImageGenerationCall::Status::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The status of the image generation call. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseItem::ImageGenerationCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseItem::ImageGenerationCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseItem::ImageGenerationCall::Status::TaggedSymbol + ) + GENERATING = + T.let( + :generating, + OpenAI::Responses::ResponseItem::ImageGenerationCall::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::Responses::ResponseItem::ImageGenerationCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseItem::ImageGenerationCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseItem::LocalShellCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the local shell call. + sig { returns(String) } + attr_accessor :id + + # Execute a shell command on the server. + sig do + returns(OpenAI::Responses::ResponseItem::LocalShellCall::Action) + end + attr_reader :action + + sig do + params( + action: + OpenAI::Responses::ResponseItem::LocalShellCall::Action::OrHash + ).void + end + attr_writer :action + + # The unique ID of the local shell tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The status of the local shell call. + sig do + returns( + OpenAI::Responses::ResponseItem::LocalShellCall::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The type of the local shell call. Always `local_shell_call`. + sig { returns(Symbol) } + attr_accessor :type + + # A tool call to run a command on the local shell. + sig do + params( + id: String, + action: + OpenAI::Responses::ResponseItem::LocalShellCall::Action::OrHash, + call_id: String, + status: + OpenAI::Responses::ResponseItem::LocalShellCall::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the local shell call. + id:, + # Execute a shell command on the server. + action:, + # The unique ID of the local shell tool call generated by the model. + call_id:, + # The status of the local shell call. + status:, + # The type of the local shell call. Always `local_shell_call`. + type: :local_shell_call + ) + end + + sig do + override.returns( + { + id: String, + action: OpenAI::Responses::ResponseItem::LocalShellCall::Action, + call_id: String, + status: + OpenAI::Responses::ResponseItem::LocalShellCall::Status::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + class Action < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseItem::LocalShellCall::Action, + OpenAI::Internal::AnyHash + ) + end + + # The command to run. + sig { returns(T::Array[String]) } + attr_accessor :command + + # Environment variables to set for the command. + sig { returns(T::Hash[Symbol, String]) } + attr_accessor :env + + # The type of the local shell action. Always `exec`. + sig { returns(Symbol) } + attr_accessor :type + + # Optional timeout in milliseconds for the command. + sig { returns(T.nilable(Integer)) } + attr_accessor :timeout_ms + + # Optional user to run the command as. + sig { returns(T.nilable(String)) } + attr_accessor :user + + # Optional working directory to run the command in. + sig { returns(T.nilable(String)) } + attr_accessor :working_directory + + # Execute a shell command on the server. + sig do + params( + command: T::Array[String], + env: T::Hash[Symbol, String], + timeout_ms: T.nilable(Integer), + user: T.nilable(String), + working_directory: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The command to run. + command:, + # Environment variables to set for the command. + env:, + # Optional timeout in milliseconds for the command. + timeout_ms: nil, + # Optional user to run the command as. + user: nil, + # Optional working directory to run the command in. + working_directory: nil, + # The type of the local shell action. Always `exec`. + type: :exec + ) + end + + sig do + override.returns( + { + command: T::Array[String], + env: T::Hash[Symbol, String], + type: Symbol, + timeout_ms: T.nilable(Integer), + user: T.nilable(String), + working_directory: T.nilable(String) + } + ) + end + def to_hash + end + end + + # The status of the local shell call. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseItem::LocalShellCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseItem::LocalShellCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseItem::LocalShellCall::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseItem::LocalShellCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseItem::LocalShellCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseItem::LocalShellCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the local shell tool call generated by the model. + sig { returns(String) } + attr_accessor :id + + # A JSON string of the output of the local shell tool call. + sig { returns(String) } + attr_accessor :output + + # The type of the local shell tool call output. Always `local_shell_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status::TaggedSymbol + ) + ) + end + attr_accessor :status + + # The output of a local shell tool call. + sig do + params( + id: String, + output: String, + status: + T.nilable( + OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status::OrSymbol + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the local shell tool call generated by the model. + id:, + # A JSON string of the output of the local shell tool call. + output:, + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + status: nil, + # The type of the local shell tool call output. Always `local_shell_call_output`. + type: :local_shell_call_output + ) + end + + sig do + override.returns( + { + id: String, + output: String, + type: Symbol, + status: + T.nilable( + OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status::TaggedSymbol + ) + } + ) + end + def to_hash + end + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class McpListTools < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseItem::McpListTools, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the list. + sig { returns(String) } + attr_accessor :id + + # The label of the MCP server. + sig { returns(String) } + attr_accessor :server_label + + # The tools available on the server. + sig do + returns( + T::Array[OpenAI::Responses::ResponseItem::McpListTools::Tool] + ) + end + attr_accessor :tools + + # The type of the item. Always `mcp_list_tools`. + sig { returns(Symbol) } + attr_accessor :type + + # Error message if the server could not list tools. + sig { returns(T.nilable(String)) } + attr_accessor :error + + # A list of tools available on an MCP server. + sig do + params( + id: String, + server_label: String, + tools: + T::Array[ + OpenAI::Responses::ResponseItem::McpListTools::Tool::OrHash + ], + error: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the list. + id:, + # The label of the MCP server. + server_label:, + # The tools available on the server. + tools:, + # Error message if the server could not list tools. + error: nil, + # The type of the item. Always `mcp_list_tools`. + type: :mcp_list_tools + ) + end + + sig do + override.returns( + { + id: String, + server_label: String, + tools: + T::Array[OpenAI::Responses::ResponseItem::McpListTools::Tool], + type: Symbol, + error: T.nilable(String) + } + ) + end + def to_hash + end + + class Tool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseItem::McpListTools::Tool, + OpenAI::Internal::AnyHash + ) + end + + # The JSON schema describing the tool's input. + sig { returns(T.anything) } + attr_accessor :input_schema + + # The name of the tool. + sig { returns(String) } + attr_accessor :name + + # Additional annotations about the tool. + sig { returns(T.nilable(T.anything)) } + attr_accessor :annotations + + # The description of the tool. + sig { returns(T.nilable(String)) } + attr_accessor :description + + # A tool available on an MCP server. + sig do + params( + input_schema: T.anything, + name: String, + annotations: T.nilable(T.anything), + description: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + # The JSON schema describing the tool's input. + input_schema:, + # The name of the tool. + name:, + # Additional annotations about the tool. + annotations: nil, + # The description of the tool. + description: nil + ) + end + + sig do + override.returns( + { + input_schema: T.anything, + name: String, + annotations: T.nilable(T.anything), + description: T.nilable(String) + } + ) + end + def to_hash + end + end + end + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseItem::McpApprovalRequest, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the approval request. + sig { returns(String) } + attr_accessor :id + + # A JSON string of arguments for the tool. + sig { returns(String) } + attr_accessor :arguments + + # The name of the tool to run. + sig { returns(String) } + attr_accessor :name + + # The label of the MCP server making the request. + sig { returns(String) } + attr_accessor :server_label + + # The type of the item. Always `mcp_approval_request`. + sig { returns(Symbol) } + attr_accessor :type + + # A request for human approval of a tool invocation. + sig do + params( + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the approval request. + id:, + # A JSON string of arguments for the tool. + arguments:, + # The name of the tool to run. + name:, + # The label of the MCP server making the request. + server_label:, + # The type of the item. Always `mcp_approval_request`. + type: :mcp_approval_request + ) + end + + sig do + override.returns( + { + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol + } + ) + end + def to_hash + end + end + + class McpApprovalResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseItem::McpApprovalResponse, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the approval response + sig { returns(String) } + attr_accessor :id + + # The ID of the approval request being answered. + sig { returns(String) } + attr_accessor :approval_request_id + + # Whether the request was approved. + sig { returns(T::Boolean) } + attr_accessor :approve + + # The type of the item. Always `mcp_approval_response`. + sig { returns(Symbol) } + attr_accessor :type + + # Optional reason for the decision. + sig { returns(T.nilable(String)) } + attr_accessor :reason + + # A response to an MCP approval request. + sig do + params( + id: String, + approval_request_id: String, + approve: T::Boolean, + reason: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the approval response + id:, + # The ID of the approval request being answered. + approval_request_id:, + # Whether the request was approved. + approve:, + # Optional reason for the decision. + reason: nil, + # The type of the item. Always `mcp_approval_response`. + type: :mcp_approval_response + ) + end + + sig do + override.returns( + { + id: String, + approval_request_id: String, + approve: T::Boolean, + type: Symbol, + reason: T.nilable(String) + } + ) + end + def to_hash + end + end + + class McpCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseItem::McpCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the tool call. + sig { returns(String) } + attr_accessor :id + + # A JSON string of the arguments passed to the tool. + sig { returns(String) } + attr_accessor :arguments + + # The name of the tool that was run. + sig { returns(String) } + attr_accessor :name + + # The label of the MCP server running the tool. + sig { returns(String) } + attr_accessor :server_label + + # The type of the item. Always `mcp_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The error from the tool call, if any. + sig { returns(T.nilable(String)) } + attr_accessor :error + + # The output from the tool call. + sig { returns(T.nilable(String)) } + attr_accessor :output + + # An invocation of a tool on an MCP server. + sig do + params( + id: String, + arguments: String, + name: String, + server_label: String, + error: T.nilable(String), + output: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the tool call. + id:, + # A JSON string of the arguments passed to the tool. + arguments:, + # The name of the tool that was run. + name:, + # The label of the MCP server running the tool. + server_label:, + # The error from the tool call, if any. + error: nil, + # The output from the tool call. + output: nil, + # The type of the item. Always `mcp_call`. + type: :mcp_call + ) + end + + sig do + override.returns( + { + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol, + error: T.nilable(String), + output: T.nilable(String) + } + ) + end + def to_hash + end + end + + sig do + override.returns(T::Array[OpenAI::Responses::ResponseItem::Variants]) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_item_list.rbi b/rbi/openai/models/responses/response_item_list.rbi new file mode 100644 index 00000000..4e645d9f --- /dev/null +++ b/rbi/openai/models/responses/response_item_list.rbi @@ -0,0 +1,97 @@ +# typed: strong + +module OpenAI + module Models + ResponseItemList = Responses::ResponseItemList + + module Responses + class ResponseItemList < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseItemList, + OpenAI::Internal::AnyHash + ) + end + + # A list of items used to generate this response. + sig { returns(T::Array[OpenAI::Responses::ResponseItem::Variants]) } + attr_accessor :data + + # The ID of the first item in the list. + sig { returns(String) } + attr_accessor :first_id + + # Whether there are more items available. + sig { returns(T::Boolean) } + attr_accessor :has_more + + # The ID of the last item in the list. + sig { returns(String) } + attr_accessor :last_id + + # The type of object returned, must be `list`. + sig { returns(Symbol) } + attr_accessor :object + + # A list of Response items. + sig do + params( + data: + T::Array[ + T.any( + OpenAI::Responses::ResponseInputMessageItem::OrHash, + OpenAI::Responses::ResponseOutputMessage::OrHash, + OpenAI::Responses::ResponseFileSearchToolCall::OrHash, + OpenAI::Responses::ResponseComputerToolCall::OrHash, + OpenAI::Responses::ResponseComputerToolCallOutputItem::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::OrHash, + OpenAI::Responses::ResponseFunctionToolCallItem::OrHash, + OpenAI::Responses::ResponseFunctionToolCallOutputItem::OrHash, + OpenAI::Responses::ResponseItem::ImageGenerationCall::OrHash, + OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, + OpenAI::Responses::ResponseItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseItem::McpListTools::OrHash, + OpenAI::Responses::ResponseItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseItem::McpApprovalResponse::OrHash, + OpenAI::Responses::ResponseItem::McpCall::OrHash + ) + ], + first_id: String, + has_more: T::Boolean, + last_id: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # A list of items used to generate this response. + data:, + # The ID of the first item in the list. + first_id:, + # Whether there are more items available. + has_more:, + # The ID of the last item in the list. + last_id:, + # The type of object returned, must be `list`. + object: :list + ) + end + + sig do + override.returns( + { + data: T::Array[OpenAI::Responses::ResponseItem::Variants], + first_id: String, + has_more: T::Boolean, + last_id: String, + object: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi b/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi new file mode 100644 index 00000000..b45c2295 --- /dev/null +++ b/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi @@ -0,0 +1,78 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseMcpCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # A JSON string containing the partial update to the arguments for the MCP tool + # call. + sig { returns(String) } + attr_accessor :delta + + # The unique identifier of the MCP tool call item being processed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response's output array. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.mcp_call_arguments.delta'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when there is a delta (partial update) to the arguments of an MCP tool + # call. + sig do + params( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A JSON string containing the partial update to the arguments for the MCP tool + # call. + delta:, + # The unique identifier of the MCP tool call item being processed. + item_id:, + # The index of the output item in the response's output array. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always 'response.mcp_call_arguments.delta'. + type: :"response.mcp_call_arguments.delta" + ) + end + + sig do + override.returns( + { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi b/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi new file mode 100644 index 00000000..c37ddf46 --- /dev/null +++ b/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi @@ -0,0 +1,75 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseMcpCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # A JSON string containing the finalized arguments for the MCP tool call. + sig { returns(String) } + attr_accessor :arguments + + # The unique identifier of the MCP tool call item being processed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response's output array. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.mcp_call_arguments.done'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the arguments for an MCP tool call are finalized. + sig do + params( + arguments: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A JSON string containing the finalized arguments for the MCP tool call. + arguments:, + # The unique identifier of the MCP tool call item being processed. + item_id:, + # The index of the output item in the response's output array. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always 'response.mcp_call_arguments.done'. + type: :"response.mcp_call_arguments.done" + ) + end + + sig do + override.returns( + { + arguments: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_mcp_call_completed_event.rbi b/rbi/openai/models/responses/response_mcp_call_completed_event.rbi new file mode 100644 index 00000000..88890871 --- /dev/null +++ b/rbi/openai/models/responses/response_mcp_call_completed_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseMcpCallCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseMcpCallCompletedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the MCP tool call item that completed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that completed. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.mcp_call.completed'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when an MCP tool call has completed successfully. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the MCP tool call item that completed. + item_id:, + # The index of the output item that completed. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always 'response.mcp_call.completed'. + type: :"response.mcp_call.completed" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_mcp_call_failed_event.rbi b/rbi/openai/models/responses/response_mcp_call_failed_event.rbi new file mode 100644 index 00000000..364dbbe3 --- /dev/null +++ b/rbi/openai/models/responses/response_mcp_call_failed_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseMcpCallFailedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseMcpCallFailedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the MCP tool call item that failed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that failed. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.mcp_call.failed'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when an MCP tool call has failed. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the MCP tool call item that failed. + item_id:, + # The index of the output item that failed. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always 'response.mcp_call.failed'. + type: :"response.mcp_call.failed" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_mcp_call_in_progress_event.rbi b/rbi/openai/models/responses/response_mcp_call_in_progress_event.rbi new file mode 100644 index 00000000..ebe6399f --- /dev/null +++ b/rbi/openai/models/responses/response_mcp_call_in_progress_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseMcpCallInProgressEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseMcpCallInProgressEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique identifier of the MCP tool call item being processed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response's output array. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.mcp_call.in_progress'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when an MCP tool call is in progress. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique identifier of the MCP tool call item being processed. + item_id:, + # The index of the output item in the response's output array. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always 'response.mcp_call.in_progress'. + type: :"response.mcp_call.in_progress" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi b/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi new file mode 100644 index 00000000..76c90724 --- /dev/null +++ b/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseMcpListToolsCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseMcpListToolsCompletedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the MCP tool call item that produced this output. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that was processed. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.mcp_list_tools.completed'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the list of available MCP tools has been successfully retrieved. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the MCP tool call item that produced this output. + item_id:, + # The index of the output item that was processed. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always 'response.mcp_list_tools.completed'. + type: :"response.mcp_list_tools.completed" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi b/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi new file mode 100644 index 00000000..1e4748f4 --- /dev/null +++ b/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseMcpListToolsFailedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseMcpListToolsFailedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the MCP tool call item that failed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that failed. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.mcp_list_tools.failed'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the attempt to list available MCP tools has failed. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the MCP tool call item that failed. + item_id:, + # The index of the output item that failed. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always 'response.mcp_list_tools.failed'. + type: :"response.mcp_list_tools.failed" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi b/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi new file mode 100644 index 00000000..17686c8a --- /dev/null +++ b/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi @@ -0,0 +1,68 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseMcpListToolsInProgressEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseMcpListToolsInProgressEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the MCP tool call item that is being processed. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that is being processed. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.mcp_list_tools.in_progress'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when the system is in the process of retrieving the list of available + # MCP tools. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the MCP tool call item that is being processed. + item_id:, + # The index of the output item that is being processed. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always 'response.mcp_list_tools.in_progress'. + type: :"response.mcp_list_tools.in_progress" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_output_audio.rbi b/rbi/openai/models/responses/response_output_audio.rbi new file mode 100644 index 00000000..49472582 --- /dev/null +++ b/rbi/openai/models/responses/response_output_audio.rbi @@ -0,0 +1,51 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseOutputAudio < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputAudio, + OpenAI::Internal::AnyHash + ) + end + + # Base64-encoded audio data from the model. + sig { returns(String) } + attr_accessor :data + + # The transcript of the audio data from the model. + sig { returns(String) } + attr_accessor :transcript + + # The type of the output audio. Always `output_audio`. + sig { returns(Symbol) } + attr_accessor :type + + # An audio output from the model. + sig do + params(data: String, transcript: String, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # Base64-encoded audio data from the model. + data:, + # The transcript of the audio data from the model. + transcript:, + # The type of the output audio. Always `output_audio`. + type: :output_audio + ) + end + + sig do + override.returns({ data: String, transcript: String, type: Symbol }) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_output_item.rbi b/rbi/openai/models/responses/response_output_item.rbi new file mode 100644 index 00000000..1f08b218 --- /dev/null +++ b/rbi/openai/models/responses/response_output_item.rbi @@ -0,0 +1,646 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # An output message from the model. + module ResponseOutputItem + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputMessage, + OpenAI::Responses::ResponseFileSearchToolCall, + OpenAI::Responses::ResponseFunctionToolCall, + OpenAI::Responses::ResponseFunctionWebSearch, + OpenAI::Responses::ResponseComputerToolCall, + OpenAI::Responses::ResponseReasoningItem, + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall, + OpenAI::Responses::ResponseCodeInterpreterToolCall, + OpenAI::Responses::ResponseOutputItem::LocalShellCall, + OpenAI::Responses::ResponseOutputItem::McpCall, + OpenAI::Responses::ResponseOutputItem::McpListTools, + OpenAI::Responses::ResponseOutputItem::McpApprovalRequest, + OpenAI::Responses::ResponseCustomToolCall + ) + end + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the image generation call. + sig { returns(String) } + attr_accessor :id + + # The generated image encoded in base64. + sig { returns(T.nilable(String)) } + attr_accessor :result + + # The status of the image generation call. + sig do + returns( + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The type of the image generation call. Always `image_generation_call`. + sig { returns(Symbol) } + attr_accessor :type + + # An image generation request made by the model. + sig do + params( + id: String, + result: T.nilable(String), + status: + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the image generation call. + id:, + # The generated image encoded in base64. + result:, + # The status of the image generation call. + status:, + # The type of the image generation call. Always `image_generation_call`. + type: :image_generation_call + ) + end + + sig do + override.returns( + { + id: String, + result: T.nilable(String), + status: + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The status of the image generation call. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status::TaggedSymbol + ) + GENERATING = + T.let( + :generating, + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputItem::LocalShellCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the local shell call. + sig { returns(String) } + attr_accessor :id + + # Execute a shell command on the server. + sig do + returns( + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action + ) + end + attr_reader :action + + sig do + params( + action: + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action::OrHash + ).void + end + attr_writer :action + + # The unique ID of the local shell tool call generated by the model. + sig { returns(String) } + attr_accessor :call_id + + # The status of the local shell call. + sig do + returns( + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The type of the local shell call. Always `local_shell_call`. + sig { returns(Symbol) } + attr_accessor :type + + # A tool call to run a command on the local shell. + sig do + params( + id: String, + action: + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action::OrHash, + call_id: String, + status: + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the local shell call. + id:, + # Execute a shell command on the server. + action:, + # The unique ID of the local shell tool call generated by the model. + call_id:, + # The status of the local shell call. + status:, + # The type of the local shell call. Always `local_shell_call`. + type: :local_shell_call + ) + end + + sig do + override.returns( + { + id: String, + action: + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action, + call_id: String, + status: + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Status::TaggedSymbol, + type: Symbol + } + ) + end + def to_hash + end + + class Action < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action, + OpenAI::Internal::AnyHash + ) + end + + # The command to run. + sig { returns(T::Array[String]) } + attr_accessor :command + + # Environment variables to set for the command. + sig { returns(T::Hash[Symbol, String]) } + attr_accessor :env + + # The type of the local shell action. Always `exec`. + sig { returns(Symbol) } + attr_accessor :type + + # Optional timeout in milliseconds for the command. + sig { returns(T.nilable(Integer)) } + attr_accessor :timeout_ms + + # Optional user to run the command as. + sig { returns(T.nilable(String)) } + attr_accessor :user + + # Optional working directory to run the command in. + sig { returns(T.nilable(String)) } + attr_accessor :working_directory + + # Execute a shell command on the server. + sig do + params( + command: T::Array[String], + env: T::Hash[Symbol, String], + timeout_ms: T.nilable(Integer), + user: T.nilable(String), + working_directory: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The command to run. + command:, + # Environment variables to set for the command. + env:, + # Optional timeout in milliseconds for the command. + timeout_ms: nil, + # Optional user to run the command as. + user: nil, + # Optional working directory to run the command in. + working_directory: nil, + # The type of the local shell action. Always `exec`. + type: :exec + ) + end + + sig do + override.returns( + { + command: T::Array[String], + env: T::Hash[Symbol, String], + type: Symbol, + timeout_ms: T.nilable(Integer), + user: T.nilable(String), + working_directory: T.nilable(String) + } + ) + end + def to_hash + end + end + + # The status of the local shell call. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Status + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseOutputItem::LocalShellCall::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class McpCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputItem::McpCall, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the tool call. + sig { returns(String) } + attr_accessor :id + + # A JSON string of the arguments passed to the tool. + sig { returns(String) } + attr_accessor :arguments + + # The name of the tool that was run. + sig { returns(String) } + attr_accessor :name + + # The label of the MCP server running the tool. + sig { returns(String) } + attr_accessor :server_label + + # The type of the item. Always `mcp_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The error from the tool call, if any. + sig { returns(T.nilable(String)) } + attr_accessor :error + + # The output from the tool call. + sig { returns(T.nilable(String)) } + attr_accessor :output + + # An invocation of a tool on an MCP server. + sig do + params( + id: String, + arguments: String, + name: String, + server_label: String, + error: T.nilable(String), + output: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the tool call. + id:, + # A JSON string of the arguments passed to the tool. + arguments:, + # The name of the tool that was run. + name:, + # The label of the MCP server running the tool. + server_label:, + # The error from the tool call, if any. + error: nil, + # The output from the tool call. + output: nil, + # The type of the item. Always `mcp_call`. + type: :mcp_call + ) + end + + sig do + override.returns( + { + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol, + error: T.nilable(String), + output: T.nilable(String) + } + ) + end + def to_hash + end + end + + class McpListTools < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputItem::McpListTools, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the list. + sig { returns(String) } + attr_accessor :id + + # The label of the MCP server. + sig { returns(String) } + attr_accessor :server_label + + # The tools available on the server. + sig do + returns( + T::Array[ + OpenAI::Responses::ResponseOutputItem::McpListTools::Tool + ] + ) + end + attr_accessor :tools + + # The type of the item. Always `mcp_list_tools`. + sig { returns(Symbol) } + attr_accessor :type + + # Error message if the server could not list tools. + sig { returns(T.nilable(String)) } + attr_accessor :error + + # A list of tools available on an MCP server. + sig do + params( + id: String, + server_label: String, + tools: + T::Array[ + OpenAI::Responses::ResponseOutputItem::McpListTools::Tool::OrHash + ], + error: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the list. + id:, + # The label of the MCP server. + server_label:, + # The tools available on the server. + tools:, + # Error message if the server could not list tools. + error: nil, + # The type of the item. Always `mcp_list_tools`. + type: :mcp_list_tools + ) + end + + sig do + override.returns( + { + id: String, + server_label: String, + tools: + T::Array[ + OpenAI::Responses::ResponseOutputItem::McpListTools::Tool + ], + type: Symbol, + error: T.nilable(String) + } + ) + end + def to_hash + end + + class Tool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputItem::McpListTools::Tool, + OpenAI::Internal::AnyHash + ) + end + + # The JSON schema describing the tool's input. + sig { returns(T.anything) } + attr_accessor :input_schema + + # The name of the tool. + sig { returns(String) } + attr_accessor :name + + # Additional annotations about the tool. + sig { returns(T.nilable(T.anything)) } + attr_accessor :annotations + + # The description of the tool. + sig { returns(T.nilable(String)) } + attr_accessor :description + + # A tool available on an MCP server. + sig do + params( + input_schema: T.anything, + name: String, + annotations: T.nilable(T.anything), + description: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + # The JSON schema describing the tool's input. + input_schema:, + # The name of the tool. + name:, + # Additional annotations about the tool. + annotations: nil, + # The description of the tool. + description: nil + ) + end + + sig do + override.returns( + { + input_schema: T.anything, + name: String, + annotations: T.nilable(T.anything), + description: T.nilable(String) + } + ) + end + def to_hash + end + end + end + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputItem::McpApprovalRequest, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the approval request. + sig { returns(String) } + attr_accessor :id + + # A JSON string of arguments for the tool. + sig { returns(String) } + attr_accessor :arguments + + # The name of the tool to run. + sig { returns(String) } + attr_accessor :name + + # The label of the MCP server making the request. + sig { returns(String) } + attr_accessor :server_label + + # The type of the item. Always `mcp_approval_request`. + sig { returns(Symbol) } + attr_accessor :type + + # A request for human approval of a tool invocation. + sig do + params( + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the approval request. + id:, + # A JSON string of arguments for the tool. + arguments:, + # The name of the tool to run. + name:, + # The label of the MCP server making the request. + server_label:, + # The type of the item. Always `mcp_approval_request`. + type: :mcp_approval_request + ) + end + + sig do + override.returns( + { + id: String, + arguments: String, + name: String, + server_label: String, + type: Symbol + } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponseOutputItem::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_output_item_added_event.rbi b/rbi/openai/models/responses/response_output_item_added_event.rbi new file mode 100644 index 00000000..6ac51356 --- /dev/null +++ b/rbi/openai/models/responses/response_output_item_added_event.rbi @@ -0,0 +1,82 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseOutputItemAddedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputItemAddedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The output item that was added. + sig { returns(OpenAI::Responses::ResponseOutputItem::Variants) } + attr_accessor :item + + # The index of the output item that was added. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.output_item.added`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a new output item is added. + sig do + params( + item: + T.any( + OpenAI::Responses::ResponseOutputMessage::OrHash, + OpenAI::Responses::ResponseFileSearchToolCall::OrHash, + OpenAI::Responses::ResponseFunctionToolCall::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::OrHash, + OpenAI::Responses::ResponseComputerToolCall::OrHash, + OpenAI::Responses::ResponseReasoningItem::OrHash, + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::OrHash, + OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, + OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseOutputItem::McpCall::OrHash, + OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash, + OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash + ), + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The output item that was added. + item:, + # The index of the output item that was added. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.output_item.added`. + type: :"response.output_item.added" + ) + end + + sig do + override.returns( + { + item: OpenAI::Responses::ResponseOutputItem::Variants, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_output_item_done_event.rbi b/rbi/openai/models/responses/response_output_item_done_event.rbi new file mode 100644 index 00000000..ba0ecc6d --- /dev/null +++ b/rbi/openai/models/responses/response_output_item_done_event.rbi @@ -0,0 +1,82 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseOutputItemDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputItemDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The output item that was marked done. + sig { returns(OpenAI::Responses::ResponseOutputItem::Variants) } + attr_accessor :item + + # The index of the output item that was marked done. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.output_item.done`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when an output item is marked done. + sig do + params( + item: + T.any( + OpenAI::Responses::ResponseOutputMessage::OrHash, + OpenAI::Responses::ResponseFileSearchToolCall::OrHash, + OpenAI::Responses::ResponseFunctionToolCall::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::OrHash, + OpenAI::Responses::ResponseComputerToolCall::OrHash, + OpenAI::Responses::ResponseReasoningItem::OrHash, + OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::OrHash, + OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, + OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseOutputItem::McpCall::OrHash, + OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash, + OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash + ), + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The output item that was marked done. + item:, + # The index of the output item that was marked done. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.output_item.done`. + type: :"response.output_item.done" + ) + end + + sig do + override.returns( + { + item: OpenAI::Responses::ResponseOutputItem::Variants, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_output_message.rbi b/rbi/openai/models/responses/response_output_message.rbi new file mode 100644 index 00000000..8e057823 --- /dev/null +++ b/rbi/openai/models/responses/response_output_message.rbi @@ -0,0 +1,162 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseOutputMessage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputMessage, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the output message. + sig { returns(String) } + attr_accessor :id + + # The content of the output message. + sig do + returns( + T::Array[ + T.any( + OpenAI::Responses::ResponseOutputText, + OpenAI::Responses::ResponseOutputRefusal + ) + ] + ) + end + attr_accessor :content + + # The role of the output message. Always `assistant`. + sig { returns(Symbol) } + attr_accessor :role + + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + sig do + returns(OpenAI::Responses::ResponseOutputMessage::Status::OrSymbol) + end + attr_accessor :status + + # The type of the output message. Always `message`. + sig { returns(Symbol) } + attr_accessor :type + + # An output message from the model. + sig do + params( + id: String, + content: + T::Array[ + T.any( + OpenAI::Responses::ResponseOutputText::OrHash, + OpenAI::Responses::ResponseOutputRefusal::OrHash + ) + ], + status: OpenAI::Responses::ResponseOutputMessage::Status::OrSymbol, + role: Symbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the output message. + id:, + # The content of the output message. + content:, + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + status:, + # The role of the output message. Always `assistant`. + role: :assistant, + # The type of the output message. Always `message`. + type: :message + ) + end + + sig do + override.returns( + { + id: String, + content: + T::Array[ + T.any( + OpenAI::Responses::ResponseOutputText, + OpenAI::Responses::ResponseOutputRefusal + ) + ], + role: Symbol, + status: + OpenAI::Responses::ResponseOutputMessage::Status::OrSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # A text output from the model. + module Content + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText, + OpenAI::Responses::ResponseOutputRefusal + ) + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseOutputMessage::Content::Variants + ] + ) + end + def self.variants + end + end + + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseOutputMessage::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseOutputMessage::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseOutputMessage::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseOutputMessage::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseOutputMessage::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_output_refusal.rbi b/rbi/openai/models/responses/response_output_refusal.rbi new file mode 100644 index 00000000..b206e330 --- /dev/null +++ b/rbi/openai/models/responses/response_output_refusal.rbi @@ -0,0 +1,39 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseOutputRefusal < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputRefusal, + OpenAI::Internal::AnyHash + ) + end + + # The refusal explanation from the model. + sig { returns(String) } + attr_accessor :refusal + + # The type of the refusal. Always `refusal`. + sig { returns(Symbol) } + attr_accessor :type + + # A refusal from the model. + sig { params(refusal: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The refusal explanation from the model. + refusal:, + # The type of the refusal. Always `refusal`. + type: :refusal + ) + end + + sig { override.returns({ refusal: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_output_text.rbi b/rbi/openai/models/responses/response_output_text.rbi new file mode 100644 index 00000000..ab31e485 --- /dev/null +++ b/rbi/openai/models/responses/response_output_text.rbi @@ -0,0 +1,477 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseOutputText < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText, + OpenAI::Internal::AnyHash + ) + end + + # The annotations of the text output. + sig do + returns( + T::Array[ + T.any( + OpenAI::Responses::ResponseOutputText::Annotation::FileCitation, + OpenAI::Responses::ResponseOutputText::Annotation::URLCitation, + OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation, + OpenAI::Responses::ResponseOutputText::Annotation::FilePath + ) + ] + ) + end + attr_accessor :annotations + + # The text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the output text. Always `output_text`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + returns( + T.nilable(T::Array[OpenAI::Responses::ResponseOutputText::Logprob]) + ) + end + attr_reader :logprobs + + sig do + params( + logprobs: + T::Array[OpenAI::Responses::ResponseOutputText::Logprob::OrHash] + ).void + end + attr_writer :logprobs + + # A text output from the model. + sig do + params( + annotations: + T::Array[ + T.any( + OpenAI::Responses::ResponseOutputText::Annotation::FileCitation::OrHash, + OpenAI::Responses::ResponseOutputText::Annotation::URLCitation::OrHash, + OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation::OrHash, + OpenAI::Responses::ResponseOutputText::Annotation::FilePath::OrHash + ) + ], + text: String, + logprobs: + T::Array[OpenAI::Responses::ResponseOutputText::Logprob::OrHash], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The annotations of the text output. + annotations:, + # The text output from the model. + text:, + logprobs: nil, + # The type of the output text. Always `output_text`. + type: :output_text + ) + end + + sig do + override.returns( + { + annotations: + T::Array[ + T.any( + OpenAI::Responses::ResponseOutputText::Annotation::FileCitation, + OpenAI::Responses::ResponseOutputText::Annotation::URLCitation, + OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation, + OpenAI::Responses::ResponseOutputText::Annotation::FilePath + ) + ], + text: String, + type: Symbol, + logprobs: T::Array[OpenAI::Responses::ResponseOutputText::Logprob] + } + ) + end + def to_hash + end + + # A citation to a file. + module Annotation + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText::Annotation::FileCitation, + OpenAI::Responses::ResponseOutputText::Annotation::URLCitation, + OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation, + OpenAI::Responses::ResponseOutputText::Annotation::FilePath + ) + end + + class FileCitation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText::Annotation::FileCitation, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file. + sig { returns(String) } + attr_accessor :file_id + + # The filename of the file cited. + sig { returns(String) } + attr_accessor :filename + + # The index of the file in the list of files. + sig { returns(Integer) } + attr_accessor :index + + # The type of the file citation. Always `file_citation`. + sig { returns(Symbol) } + attr_accessor :type + + # A citation to a file. + sig do + params( + file_id: String, + filename: String, + index: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the file. + file_id:, + # The filename of the file cited. + filename:, + # The index of the file in the list of files. + index:, + # The type of the file citation. Always `file_citation`. + type: :file_citation + ) + end + + sig do + override.returns( + { + file_id: String, + filename: String, + index: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + + class URLCitation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText::Annotation::URLCitation, + OpenAI::Internal::AnyHash + ) + end + + # The index of the last character of the URL citation in the message. + sig { returns(Integer) } + attr_accessor :end_index + + # The index of the first character of the URL citation in the message. + sig { returns(Integer) } + attr_accessor :start_index + + # The title of the web resource. + sig { returns(String) } + attr_accessor :title + + # The type of the URL citation. Always `url_citation`. + sig { returns(Symbol) } + attr_accessor :type + + # The URL of the web resource. + sig { returns(String) } + attr_accessor :url + + # A citation for a web resource used to generate a model response. + sig do + params( + end_index: Integer, + start_index: Integer, + title: String, + url: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the last character of the URL citation in the message. + end_index:, + # The index of the first character of the URL citation in the message. + start_index:, + # The title of the web resource. + title:, + # The URL of the web resource. + url:, + # The type of the URL citation. Always `url_citation`. + type: :url_citation + ) + end + + sig do + override.returns( + { + end_index: Integer, + start_index: Integer, + title: String, + type: Symbol, + url: String + } + ) + end + def to_hash + end + end + + class ContainerFileCitation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the container file. + sig { returns(String) } + attr_accessor :container_id + + # The index of the last character of the container file citation in the message. + sig { returns(Integer) } + attr_accessor :end_index + + # The ID of the file. + sig { returns(String) } + attr_accessor :file_id + + # The filename of the container file cited. + sig { returns(String) } + attr_accessor :filename + + # The index of the first character of the container file citation in the message. + sig { returns(Integer) } + attr_accessor :start_index + + # The type of the container file citation. Always `container_file_citation`. + sig { returns(Symbol) } + attr_accessor :type + + # A citation for a container file used to generate a model response. + sig do + params( + container_id: String, + end_index: Integer, + file_id: String, + filename: String, + start_index: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the container file. + container_id:, + # The index of the last character of the container file citation in the message. + end_index:, + # The ID of the file. + file_id:, + # The filename of the container file cited. + filename:, + # The index of the first character of the container file citation in the message. + start_index:, + # The type of the container file citation. Always `container_file_citation`. + type: :container_file_citation + ) + end + + sig do + override.returns( + { + container_id: String, + end_index: Integer, + file_id: String, + filename: String, + start_index: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + + class FilePath < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText::Annotation::FilePath, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the file. + sig { returns(String) } + attr_accessor :file_id + + # The index of the file in the list of files. + sig { returns(Integer) } + attr_accessor :index + + # The type of the file path. Always `file_path`. + sig { returns(Symbol) } + attr_accessor :type + + # A path to a file. + sig do + params(file_id: String, index: Integer, type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # The ID of the file. + file_id:, + # The index of the file in the list of files. + index:, + # The type of the file path. Always `file_path`. + type: :file_path + ) + end + + sig do + override.returns( + { file_id: String, index: Integer, type: Symbol } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseOutputText::Annotation::Variants + ] + ) + end + def self.variants + end + end + + class Logprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText::Logprob, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :token + + sig { returns(T::Array[Integer]) } + attr_accessor :bytes + + sig { returns(Float) } + attr_accessor :logprob + + sig do + returns( + T::Array[ + OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob + ] + ) + end + attr_accessor :top_logprobs + + # The log probability of a token. + sig do + params( + token: String, + bytes: T::Array[Integer], + logprob: Float, + top_logprobs: + T::Array[ + OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob::OrHash + ] + ).returns(T.attached_class) + end + def self.new(token:, bytes:, logprob:, top_logprobs:) + end + + sig do + override.returns( + { + token: String, + bytes: T::Array[Integer], + logprob: Float, + top_logprobs: + T::Array[ + OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob + ] + } + ) + end + def to_hash + end + + class TopLogprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :token + + sig { returns(T::Array[Integer]) } + attr_accessor :bytes + + sig { returns(Float) } + attr_accessor :logprob + + # The top log probability of a token. + sig do + params( + token: String, + bytes: T::Array[Integer], + logprob: Float + ).returns(T.attached_class) + end + def self.new(token:, bytes:, logprob:) + end + + sig do + override.returns( + { token: String, bytes: T::Array[Integer], logprob: Float } + ) + end + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_output_text_annotation_added_event.rbi b/rbi/openai/models/responses/response_output_text_annotation_added_event.rbi new file mode 100644 index 00000000..c26c48d6 --- /dev/null +++ b/rbi/openai/models/responses/response_output_text_annotation_added_event.rbi @@ -0,0 +1,91 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseOutputTextAnnotationAddedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The annotation object being added. (See annotation schema for details.) + sig { returns(T.anything) } + attr_accessor :annotation + + # The index of the annotation within the content part. + sig { returns(Integer) } + attr_accessor :annotation_index + + # The index of the content part within the output item. + sig { returns(Integer) } + attr_accessor :content_index + + # The unique identifier of the item to which the annotation is being added. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item in the response's output array. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.output_text.annotation.added'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when an annotation is added to output text content. + sig do + params( + annotation: T.anything, + annotation_index: Integer, + content_index: Integer, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The annotation object being added. (See annotation schema for details.) + annotation:, + # The index of the annotation within the content part. + annotation_index:, + # The index of the content part within the output item. + content_index:, + # The unique identifier of the item to which the annotation is being added. + item_id:, + # The index of the output item in the response's output array. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always 'response.output_text.annotation.added'. + type: :"response.output_text.annotation.added" + ) + end + + sig do + override.returns( + { + annotation: T.anything, + annotation_index: Integer, + content_index: Integer, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_prompt.rbi b/rbi/openai/models/responses/response_prompt.rbi new file mode 100644 index 00000000..4a90fa5c --- /dev/null +++ b/rbi/openai/models/responses/response_prompt.rbi @@ -0,0 +1,120 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponsePrompt < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::ResponsePrompt, OpenAI::Internal::AnyHash) + end + + # The unique identifier of the prompt template to use. + sig { returns(String) } + attr_accessor :id + + # Optional map of values to substitute in for variables in your prompt. The + # substitution values can either be strings, or other Response input types like + # images or files. + sig do + returns( + T.nilable( + T::Hash[ + Symbol, + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Responses::ResponseInputImage, + OpenAI::Responses::ResponseInputFile + ) + ] + ) + ) + end + attr_accessor :variables + + # Optional version of the prompt template. + sig { returns(T.nilable(String)) } + attr_accessor :version + + # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + sig do + params( + id: String, + variables: + T.nilable( + T::Hash[ + Symbol, + T.any( + String, + OpenAI::Responses::ResponseInputText::OrHash, + OpenAI::Responses::ResponseInputImage::OrHash, + OpenAI::Responses::ResponseInputFile::OrHash + ) + ] + ), + version: T.nilable(String) + ).returns(T.attached_class) + end + def self.new( + # The unique identifier of the prompt template to use. + id:, + # Optional map of values to substitute in for variables in your prompt. The + # substitution values can either be strings, or other Response input types like + # images or files. + variables: nil, + # Optional version of the prompt template. + version: nil + ) + end + + sig do + override.returns( + { + id: String, + variables: + T.nilable( + T::Hash[ + Symbol, + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Responses::ResponseInputImage, + OpenAI::Responses::ResponseInputFile + ) + ] + ), + version: T.nilable(String) + } + ) + end + def to_hash + end + + # A text input to the model. + module Variable + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::ResponseInputText, + OpenAI::Responses::ResponseInputImage, + OpenAI::Responses::ResponseInputFile + ) + end + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponsePrompt::Variable::Variants] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_queued_event.rbi b/rbi/openai/models/responses/response_queued_event.rbi new file mode 100644 index 00000000..015a597c --- /dev/null +++ b/rbi/openai/models/responses/response_queued_event.rbi @@ -0,0 +1,62 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseQueuedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseQueuedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The full response object that is queued. + sig { returns(OpenAI::Responses::Response) } + attr_reader :response + + sig { params(response: OpenAI::Responses::Response::OrHash).void } + attr_writer :response + + # The sequence number for this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always 'response.queued'. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a response is queued and waiting to be processed. + sig do + params( + response: OpenAI::Responses::Response::OrHash, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The full response object that is queued. + response:, + # The sequence number for this event. + sequence_number:, + # The type of the event. Always 'response.queued'. + type: :"response.queued" + ) + end + + sig do + override.returns( + { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_reasoning_item.rbi b/rbi/openai/models/responses/response_reasoning_item.rbi new file mode 100644 index 00000000..328f2828 --- /dev/null +++ b/rbi/openai/models/responses/response_reasoning_item.rbi @@ -0,0 +1,229 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningItem, + OpenAI::Internal::AnyHash + ) + end + + # The unique identifier of the reasoning content. + sig { returns(String) } + attr_accessor :id + + # Reasoning summary content. + sig do + returns(T::Array[OpenAI::Responses::ResponseReasoningItem::Summary]) + end + attr_accessor :summary + + # The type of the object. Always `reasoning`. + sig { returns(Symbol) } + attr_accessor :type + + # Reasoning text content. + sig do + returns( + T.nilable( + T::Array[OpenAI::Responses::ResponseReasoningItem::Content] + ) + ) + end + attr_reader :content + + sig do + params( + content: + T::Array[ + OpenAI::Responses::ResponseReasoningItem::Content::OrHash + ] + ).void + end + attr_writer :content + + # The encrypted content of the reasoning item - populated when a response is + # generated with `reasoning.encrypted_content` in the `include` parameter. + sig { returns(T.nilable(String)) } + attr_accessor :encrypted_content + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseReasoningItem::Status::OrSymbol + ) + ) + end + attr_reader :status + + sig do + params( + status: OpenAI::Responses::ResponseReasoningItem::Status::OrSymbol + ).void + end + attr_writer :status + + # A description of the chain of thought used by a reasoning model while generating + # a response. Be sure to include these items in your `input` to the Responses API + # for subsequent turns of a conversation if you are manually + # [managing context](https://platform.openai.com/docs/guides/conversation-state). + sig do + params( + id: String, + summary: + T::Array[ + OpenAI::Responses::ResponseReasoningItem::Summary::OrHash + ], + content: + T::Array[ + OpenAI::Responses::ResponseReasoningItem::Content::OrHash + ], + encrypted_content: T.nilable(String), + status: OpenAI::Responses::ResponseReasoningItem::Status::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique identifier of the reasoning content. + id:, + # Reasoning summary content. + summary:, + # Reasoning text content. + content: nil, + # The encrypted content of the reasoning item - populated when a response is + # generated with `reasoning.encrypted_content` in the `include` parameter. + encrypted_content: nil, + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + status: nil, + # The type of the object. Always `reasoning`. + type: :reasoning + ) + end + + sig do + override.returns( + { + id: String, + summary: + T::Array[OpenAI::Responses::ResponseReasoningItem::Summary], + type: Symbol, + content: + T::Array[OpenAI::Responses::ResponseReasoningItem::Content], + encrypted_content: T.nilable(String), + status: OpenAI::Responses::ResponseReasoningItem::Status::OrSymbol + } + ) + end + def to_hash + end + + class Summary < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningItem::Summary, + OpenAI::Internal::AnyHash + ) + end + + # A summary of the reasoning output from the model so far. + sig { returns(String) } + attr_accessor :text + + # The type of the object. Always `summary_text`. + sig { returns(Symbol) } + attr_accessor :type + + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # A summary of the reasoning output from the model so far. + text:, + # The type of the object. Always `summary_text`. + type: :summary_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningItem::Content, + OpenAI::Internal::AnyHash + ) + end + + # Reasoning text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the object. Always `reasoning_text`. + sig { returns(Symbol) } + attr_accessor :type + + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # Reasoning text output from the model. + text:, + # The type of the object. Always `reasoning_text`. + type: :reasoning_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseReasoningItem::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::Responses::ResponseReasoningItem::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::Responses::ResponseReasoningItem::Status::TaggedSymbol + ) + INCOMPLETE = + T.let( + :incomplete, + OpenAI::Responses::ResponseReasoningItem::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseReasoningItem::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_reasoning_summary_part_added_event.rbi b/rbi/openai/models/responses/response_reasoning_summary_part_added_event.rbi new file mode 100644 index 00000000..400eaae7 --- /dev/null +++ b/rbi/openai/models/responses/response_reasoning_summary_part_added_event.rbi @@ -0,0 +1,129 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseReasoningSummaryPartAddedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the item this summary part is associated with. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item this summary part is associated with. + sig { returns(Integer) } + attr_accessor :output_index + + # The summary part that was added. + sig do + returns( + OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part + ) + end + attr_reader :part + + sig do + params( + part: + OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part::OrHash + ).void + end + attr_writer :part + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The index of the summary part within the reasoning summary. + sig { returns(Integer) } + attr_accessor :summary_index + + # The type of the event. Always `response.reasoning_summary_part.added`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a new reasoning summary part is added. + sig do + params( + item_id: String, + output_index: Integer, + part: + OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part::OrHash, + sequence_number: Integer, + summary_index: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the item this summary part is associated with. + item_id:, + # The index of the output item this summary part is associated with. + output_index:, + # The summary part that was added. + part:, + # The sequence number of this event. + sequence_number:, + # The index of the summary part within the reasoning summary. + summary_index:, + # The type of the event. Always `response.reasoning_summary_part.added`. + type: :"response.reasoning_summary_part.added" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + part: + OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part, + sequence_number: Integer, + summary_index: Integer, + type: Symbol + } + ) + end + def to_hash + end + + class Part < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part, + OpenAI::Internal::AnyHash + ) + end + + # The text of the summary part. + sig { returns(String) } + attr_accessor :text + + # The type of the summary part. Always `summary_text`. + sig { returns(Symbol) } + attr_accessor :type + + # The summary part that was added. + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The text of the summary part. + text:, + # The type of the summary part. Always `summary_text`. + type: :summary_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_reasoning_summary_part_done_event.rbi b/rbi/openai/models/responses/response_reasoning_summary_part_done_event.rbi new file mode 100644 index 00000000..40caaff1 --- /dev/null +++ b/rbi/openai/models/responses/response_reasoning_summary_part_done_event.rbi @@ -0,0 +1,129 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseReasoningSummaryPartDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the item this summary part is associated with. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item this summary part is associated with. + sig { returns(Integer) } + attr_accessor :output_index + + # The completed summary part. + sig do + returns( + OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part + ) + end + attr_reader :part + + sig do + params( + part: + OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part::OrHash + ).void + end + attr_writer :part + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The index of the summary part within the reasoning summary. + sig { returns(Integer) } + attr_accessor :summary_index + + # The type of the event. Always `response.reasoning_summary_part.done`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a reasoning summary part is completed. + sig do + params( + item_id: String, + output_index: Integer, + part: + OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part::OrHash, + sequence_number: Integer, + summary_index: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the item this summary part is associated with. + item_id:, + # The index of the output item this summary part is associated with. + output_index:, + # The completed summary part. + part:, + # The sequence number of this event. + sequence_number:, + # The index of the summary part within the reasoning summary. + summary_index:, + # The type of the event. Always `response.reasoning_summary_part.done`. + type: :"response.reasoning_summary_part.done" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + part: + OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part, + sequence_number: Integer, + summary_index: Integer, + type: Symbol + } + ) + end + def to_hash + end + + class Part < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part, + OpenAI::Internal::AnyHash + ) + end + + # The text of the summary part. + sig { returns(String) } + attr_accessor :text + + # The type of the summary part. Always `summary_text`. + sig { returns(Symbol) } + attr_accessor :type + + # The completed summary part. + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The text of the summary part. + text:, + # The type of the summary part. Always `summary_text`. + type: :summary_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_reasoning_summary_text_delta_event.rbi b/rbi/openai/models/responses/response_reasoning_summary_text_delta_event.rbi new file mode 100644 index 00000000..f4af0148 --- /dev/null +++ b/rbi/openai/models/responses/response_reasoning_summary_text_delta_event.rbi @@ -0,0 +1,83 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseReasoningSummaryTextDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningSummaryTextDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The text delta that was added to the summary. + sig { returns(String) } + attr_accessor :delta + + # The ID of the item this summary text delta is associated with. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item this summary text delta is associated with. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The index of the summary part within the reasoning summary. + sig { returns(Integer) } + attr_accessor :summary_index + + # The type of the event. Always `response.reasoning_summary_text.delta`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a delta is added to a reasoning summary text. + sig do + params( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + summary_index: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The text delta that was added to the summary. + delta:, + # The ID of the item this summary text delta is associated with. + item_id:, + # The index of the output item this summary text delta is associated with. + output_index:, + # The sequence number of this event. + sequence_number:, + # The index of the summary part within the reasoning summary. + summary_index:, + # The type of the event. Always `response.reasoning_summary_text.delta`. + type: :"response.reasoning_summary_text.delta" + ) + end + + sig do + override.returns( + { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + summary_index: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_reasoning_summary_text_done_event.rbi b/rbi/openai/models/responses/response_reasoning_summary_text_done_event.rbi new file mode 100644 index 00000000..95ab837f --- /dev/null +++ b/rbi/openai/models/responses/response_reasoning_summary_text_done_event.rbi @@ -0,0 +1,83 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseReasoningSummaryTextDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningSummaryTextDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the item this summary text is associated with. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item this summary text is associated with. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The index of the summary part within the reasoning summary. + sig { returns(Integer) } + attr_accessor :summary_index + + # The full text of the completed reasoning summary. + sig { returns(String) } + attr_accessor :text + + # The type of the event. Always `response.reasoning_summary_text.done`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a reasoning summary text is completed. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + summary_index: Integer, + text: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the item this summary text is associated with. + item_id:, + # The index of the output item this summary text is associated with. + output_index:, + # The sequence number of this event. + sequence_number:, + # The index of the summary part within the reasoning summary. + summary_index:, + # The full text of the completed reasoning summary. + text:, + # The type of the event. Always `response.reasoning_summary_text.done`. + type: :"response.reasoning_summary_text.done" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + summary_index: Integer, + text: String, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi b/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi new file mode 100644 index 00000000..54336041 --- /dev/null +++ b/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi @@ -0,0 +1,83 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseReasoningTextDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningTextDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The index of the reasoning content part this delta is associated with. + sig { returns(Integer) } + attr_accessor :content_index + + # The text delta that was added to the reasoning content. + sig { returns(String) } + attr_accessor :delta + + # The ID of the item this reasoning text delta is associated with. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item this reasoning text delta is associated with. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.reasoning_text.delta`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a delta is added to a reasoning text. + sig do + params( + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the reasoning content part this delta is associated with. + content_index:, + # The text delta that was added to the reasoning content. + delta:, + # The ID of the item this reasoning text delta is associated with. + item_id:, + # The index of the output item this reasoning text delta is associated with. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.reasoning_text.delta`. + type: :"response.reasoning_text.delta" + ) + end + + sig do + override.returns( + { + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_reasoning_text_done_event.rbi b/rbi/openai/models/responses/response_reasoning_text_done_event.rbi new file mode 100644 index 00000000..2561422b --- /dev/null +++ b/rbi/openai/models/responses/response_reasoning_text_done_event.rbi @@ -0,0 +1,83 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseReasoningTextDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningTextDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The index of the reasoning content part. + sig { returns(Integer) } + attr_accessor :content_index + + # The ID of the item this reasoning text is associated with. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item this reasoning text is associated with. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The full text of the completed reasoning content. + sig { returns(String) } + attr_accessor :text + + # The type of the event. Always `response.reasoning_text.done`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a reasoning text is completed. + sig do + params( + content_index: Integer, + item_id: String, + output_index: Integer, + sequence_number: Integer, + text: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the reasoning content part. + content_index:, + # The ID of the item this reasoning text is associated with. + item_id:, + # The index of the output item this reasoning text is associated with. + output_index:, + # The sequence number of this event. + sequence_number:, + # The full text of the completed reasoning content. + text:, + # The type of the event. Always `response.reasoning_text.done`. + type: :"response.reasoning_text.done" + ) + end + + sig do + override.returns( + { + content_index: Integer, + item_id: String, + output_index: Integer, + sequence_number: Integer, + text: String, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_refusal_delta_event.rbi b/rbi/openai/models/responses/response_refusal_delta_event.rbi new file mode 100644 index 00000000..34271558 --- /dev/null +++ b/rbi/openai/models/responses/response_refusal_delta_event.rbi @@ -0,0 +1,83 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseRefusalDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseRefusalDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The index of the content part that the refusal text is added to. + sig { returns(Integer) } + attr_accessor :content_index + + # The refusal text that is added. + sig { returns(String) } + attr_accessor :delta + + # The ID of the output item that the refusal text is added to. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the refusal text is added to. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.refusal.delta`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when there is a partial refusal text. + sig do + params( + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the content part that the refusal text is added to. + content_index:, + # The refusal text that is added. + delta:, + # The ID of the output item that the refusal text is added to. + item_id:, + # The index of the output item that the refusal text is added to. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.refusal.delta`. + type: :"response.refusal.delta" + ) + end + + sig do + override.returns( + { + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_refusal_done_event.rbi b/rbi/openai/models/responses/response_refusal_done_event.rbi new file mode 100644 index 00000000..3f7a62e8 --- /dev/null +++ b/rbi/openai/models/responses/response_refusal_done_event.rbi @@ -0,0 +1,83 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseRefusalDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseRefusalDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The index of the content part that the refusal text is finalized. + sig { returns(Integer) } + attr_accessor :content_index + + # The ID of the output item that the refusal text is finalized. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the refusal text is finalized. + sig { returns(Integer) } + attr_accessor :output_index + + # The refusal text that is finalized. + sig { returns(String) } + attr_accessor :refusal + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.refusal.done`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when refusal text is finalized. + sig do + params( + content_index: Integer, + item_id: String, + output_index: Integer, + refusal: String, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the content part that the refusal text is finalized. + content_index:, + # The ID of the output item that the refusal text is finalized. + item_id:, + # The index of the output item that the refusal text is finalized. + output_index:, + # The refusal text that is finalized. + refusal:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.refusal.done`. + type: :"response.refusal.done" + ) + end + + sig do + override.returns( + { + content_index: Integer, + item_id: String, + output_index: Integer, + refusal: String, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_retrieve_params.rbi b/rbi/openai/models/responses/response_retrieve_params.rbi new file mode 100644 index 00000000..f4d1f80c --- /dev/null +++ b/rbi/openai/models/responses/response_retrieve_params.rbi @@ -0,0 +1,94 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. + sig do + returns( + T.nilable(T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]) + ) + end + attr_reader :include + + sig do + params( + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] + ).void + end + attr_writer :include + + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :include_obfuscation + + sig { params(include_obfuscation: T::Boolean).void } + attr_writer :include_obfuscation + + # The sequence number of the event after which to start streaming. + sig { returns(T.nilable(Integer)) } + attr_reader :starting_after + + sig { params(starting_after: Integer).void } + attr_writer :starting_after + + sig do + params( + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + include_obfuscation: T::Boolean, + starting_after: Integer, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. + include: nil, + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + include_obfuscation: nil, + # The sequence number of the event after which to start streaming. + starting_after: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + include: + T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + include_obfuscation: T::Boolean, + starting_after: Integer, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_status.rbi b/rbi/openai/models/responses/response_status.rbi new file mode 100644 index 00000000..5eb9802f --- /dev/null +++ b/rbi/openai/models/responses/response_status.rbi @@ -0,0 +1,36 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # The status of the response generation. One of `completed`, `failed`, + # `in_progress`, `cancelled`, `queued`, or `incomplete`. + module ResponseStatus + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Responses::ResponseStatus) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + COMPLETED = + T.let(:completed, OpenAI::Responses::ResponseStatus::TaggedSymbol) + FAILED = T.let(:failed, OpenAI::Responses::ResponseStatus::TaggedSymbol) + IN_PROGRESS = + T.let(:in_progress, OpenAI::Responses::ResponseStatus::TaggedSymbol) + CANCELLED = + T.let(:cancelled, OpenAI::Responses::ResponseStatus::TaggedSymbol) + QUEUED = T.let(:queued, OpenAI::Responses::ResponseStatus::TaggedSymbol) + INCOMPLETE = + T.let(:incomplete, OpenAI::Responses::ResponseStatus::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponseStatus::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_stream_event.rbi b/rbi/openai/models/responses/response_stream_event.rbi new file mode 100644 index 00000000..0aba05b2 --- /dev/null +++ b/rbi/openai/models/responses/response_stream_event.rbi @@ -0,0 +1,79 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # Emitted when there is a partial audio response. + module ResponseStreamEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::ResponseAudioDeltaEvent, + OpenAI::Responses::ResponseAudioDoneEvent, + OpenAI::Responses::ResponseAudioTranscriptDeltaEvent, + OpenAI::Responses::ResponseAudioTranscriptDoneEvent, + OpenAI::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, + OpenAI::Responses::ResponseCodeInterpreterCallCodeDoneEvent, + OpenAI::Responses::ResponseCodeInterpreterCallCompletedEvent, + OpenAI::Responses::ResponseCodeInterpreterCallInProgressEvent, + OpenAI::Responses::ResponseCodeInterpreterCallInterpretingEvent, + OpenAI::Responses::ResponseCompletedEvent, + OpenAI::Responses::ResponseContentPartAddedEvent, + OpenAI::Responses::ResponseContentPartDoneEvent, + OpenAI::Responses::ResponseCreatedEvent, + OpenAI::Responses::ResponseErrorEvent, + OpenAI::Responses::ResponseFileSearchCallCompletedEvent, + OpenAI::Responses::ResponseFileSearchCallInProgressEvent, + OpenAI::Responses::ResponseFileSearchCallSearchingEvent, + OpenAI::Responses::ResponseFunctionCallArgumentsDeltaEvent, + OpenAI::Responses::ResponseFunctionCallArgumentsDoneEvent, + OpenAI::Responses::ResponseInProgressEvent, + OpenAI::Responses::ResponseFailedEvent, + OpenAI::Responses::ResponseIncompleteEvent, + OpenAI::Responses::ResponseOutputItemAddedEvent, + OpenAI::Responses::ResponseOutputItemDoneEvent, + OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent, + OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent, + OpenAI::Responses::ResponseReasoningSummaryTextDeltaEvent, + OpenAI::Responses::ResponseReasoningSummaryTextDoneEvent, + OpenAI::Responses::ResponseReasoningTextDeltaEvent, + OpenAI::Responses::ResponseReasoningTextDoneEvent, + OpenAI::Responses::ResponseRefusalDeltaEvent, + OpenAI::Responses::ResponseRefusalDoneEvent, + OpenAI::Responses::ResponseTextDeltaEvent, + OpenAI::Responses::ResponseTextDoneEvent, + OpenAI::Responses::ResponseWebSearchCallCompletedEvent, + OpenAI::Responses::ResponseWebSearchCallInProgressEvent, + OpenAI::Responses::ResponseWebSearchCallSearchingEvent, + OpenAI::Responses::ResponseImageGenCallCompletedEvent, + OpenAI::Responses::ResponseImageGenCallGeneratingEvent, + OpenAI::Responses::ResponseImageGenCallInProgressEvent, + OpenAI::Responses::ResponseImageGenCallPartialImageEvent, + OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent, + OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent, + OpenAI::Responses::ResponseMcpCallCompletedEvent, + OpenAI::Responses::ResponseMcpCallFailedEvent, + OpenAI::Responses::ResponseMcpCallInProgressEvent, + OpenAI::Responses::ResponseMcpListToolsCompletedEvent, + OpenAI::Responses::ResponseMcpListToolsFailedEvent, + OpenAI::Responses::ResponseMcpListToolsInProgressEvent, + OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent, + OpenAI::Responses::ResponseQueuedEvent, + OpenAI::Responses::ResponseCustomToolCallInputDeltaEvent, + OpenAI::Responses::ResponseCustomToolCallInputDoneEvent + ) + end + + sig do + override.returns( + T::Array[OpenAI::Responses::ResponseStreamEvent::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_text_config.rbi b/rbi/openai/models/responses/response_text_config.rbi new file mode 100644 index 00000000..e36e1bac --- /dev/null +++ b/rbi/openai/models/responses/response_text_config.rbi @@ -0,0 +1,166 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseTextConfig < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseTextConfig, + OpenAI::Internal::AnyHash + ) + end + + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + sig do + returns( + T.nilable( + T.any( + OpenAI::ResponseFormatText, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig, + OpenAI::ResponseFormatJSONObject + ) + ) + ) + end + attr_reader :format_ + + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ) + ).void + end + attr_writer :format_ + + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseTextConfig::Verbosity::OrSymbol + ) + ) + end + attr_accessor :verbosity + + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + sig do + params( + format_: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ), + verbosity: + T.nilable( + OpenAI::Responses::ResponseTextConfig::Verbosity::OrSymbol + ) + ).returns(T.attached_class) + end + def self.new( + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + format_: nil, + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + verbosity: nil + ) + end + + sig do + override.returns( + { + format_: + T.any( + OpenAI::ResponseFormatText, + OpenAI::Responses::ResponseFormatTextJSONSchemaConfig, + OpenAI::ResponseFormatJSONObject + ), + verbosity: + T.nilable( + OpenAI::Responses::ResponseTextConfig::Verbosity::OrSymbol + ) + } + ) + end + def to_hash + end + + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + module Verbosity + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseTextConfig::Verbosity) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let( + :low, + OpenAI::Responses::ResponseTextConfig::Verbosity::TaggedSymbol + ) + MEDIUM = + T.let( + :medium, + OpenAI::Responses::ResponseTextConfig::Verbosity::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Responses::ResponseTextConfig::Verbosity::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseTextConfig::Verbosity::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_text_delta_event.rbi b/rbi/openai/models/responses/response_text_delta_event.rbi new file mode 100644 index 00000000..bd438476 --- /dev/null +++ b/rbi/openai/models/responses/response_text_delta_event.rbi @@ -0,0 +1,214 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseTextDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseTextDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The index of the content part that the text delta was added to. + sig { returns(Integer) } + attr_accessor :content_index + + # The text delta that was added. + sig { returns(String) } + attr_accessor :delta + + # The ID of the output item that the text delta was added to. + sig { returns(String) } + attr_accessor :item_id + + # The log probabilities of the tokens in the delta. + sig do + returns(T::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob]) + end + attr_accessor :logprobs + + # The index of the output item that the text delta was added to. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number for this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.output_text.delta`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when there is an additional text delta. + sig do + params( + content_index: Integer, + delta: String, + item_id: String, + logprobs: + T::Array[ + OpenAI::Responses::ResponseTextDeltaEvent::Logprob::OrHash + ], + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the content part that the text delta was added to. + content_index:, + # The text delta that was added. + delta:, + # The ID of the output item that the text delta was added to. + item_id:, + # The log probabilities of the tokens in the delta. + logprobs:, + # The index of the output item that the text delta was added to. + output_index:, + # The sequence number for this event. + sequence_number:, + # The type of the event. Always `response.output_text.delta`. + type: :"response.output_text.delta" + ) + end + + sig do + override.returns( + { + content_index: Integer, + delta: String, + item_id: String, + logprobs: + T::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob], + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + + class Logprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseTextDeltaEvent::Logprob, + OpenAI::Internal::AnyHash + ) + end + + # A possible text token. + sig { returns(String) } + attr_accessor :token + + # The log probability of this token. + sig { returns(Float) } + attr_accessor :logprob + + # The log probability of the top 20 most likely tokens. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob + ] + ) + ) + end + attr_reader :top_logprobs + + sig do + params( + top_logprobs: + T::Array[ + OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob::OrHash + ] + ).void + end + attr_writer :top_logprobs + + # A logprob is the logarithmic probability that the model assigns to producing a + # particular token at a given position in the sequence. Less-negative (higher) + # logprob values indicate greater model confidence in that token choice. + sig do + params( + token: String, + logprob: Float, + top_logprobs: + T::Array[ + OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob::OrHash + ] + ).returns(T.attached_class) + end + def self.new( + # A possible text token. + token:, + # The log probability of this token. + logprob:, + # The log probability of the top 20 most likely tokens. + top_logprobs: nil + ) + end + + sig do + override.returns( + { + token: String, + logprob: Float, + top_logprobs: + T::Array[ + OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob + ] + } + ) + end + def to_hash + end + + class TopLogprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob, + OpenAI::Internal::AnyHash + ) + end + + # A possible text token. + sig { returns(T.nilable(String)) } + attr_reader :token + + sig { params(token: String).void } + attr_writer :token + + # The log probability of this token. + sig { returns(T.nilable(Float)) } + attr_reader :logprob + + sig { params(logprob: Float).void } + attr_writer :logprob + + sig do + params(token: String, logprob: Float).returns(T.attached_class) + end + def self.new( + # A possible text token. + token: nil, + # The log probability of this token. + logprob: nil + ) + end + + sig { override.returns({ token: String, logprob: Float }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_text_done_event.rbi b/rbi/openai/models/responses/response_text_done_event.rbi new file mode 100644 index 00000000..2fe91762 --- /dev/null +++ b/rbi/openai/models/responses/response_text_done_event.rbi @@ -0,0 +1,214 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseTextDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseTextDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The index of the content part that the text content is finalized. + sig { returns(Integer) } + attr_accessor :content_index + + # The ID of the output item that the text content is finalized. + sig { returns(String) } + attr_accessor :item_id + + # The log probabilities of the tokens in the delta. + sig do + returns(T::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob]) + end + attr_accessor :logprobs + + # The index of the output item that the text content is finalized. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number for this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The text content that is finalized. + sig { returns(String) } + attr_accessor :text + + # The type of the event. Always `response.output_text.done`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when text content is finalized. + sig do + params( + content_index: Integer, + item_id: String, + logprobs: + T::Array[ + OpenAI::Responses::ResponseTextDoneEvent::Logprob::OrHash + ], + output_index: Integer, + sequence_number: Integer, + text: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the content part that the text content is finalized. + content_index:, + # The ID of the output item that the text content is finalized. + item_id:, + # The log probabilities of the tokens in the delta. + logprobs:, + # The index of the output item that the text content is finalized. + output_index:, + # The sequence number for this event. + sequence_number:, + # The text content that is finalized. + text:, + # The type of the event. Always `response.output_text.done`. + type: :"response.output_text.done" + ) + end + + sig do + override.returns( + { + content_index: Integer, + item_id: String, + logprobs: + T::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob], + output_index: Integer, + sequence_number: Integer, + text: String, + type: Symbol + } + ) + end + def to_hash + end + + class Logprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseTextDoneEvent::Logprob, + OpenAI::Internal::AnyHash + ) + end + + # A possible text token. + sig { returns(String) } + attr_accessor :token + + # The log probability of this token. + sig { returns(Float) } + attr_accessor :logprob + + # The log probability of the top 20 most likely tokens. + sig do + returns( + T.nilable( + T::Array[ + OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob + ] + ) + ) + end + attr_reader :top_logprobs + + sig do + params( + top_logprobs: + T::Array[ + OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob::OrHash + ] + ).void + end + attr_writer :top_logprobs + + # A logprob is the logarithmic probability that the model assigns to producing a + # particular token at a given position in the sequence. Less-negative (higher) + # logprob values indicate greater model confidence in that token choice. + sig do + params( + token: String, + logprob: Float, + top_logprobs: + T::Array[ + OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob::OrHash + ] + ).returns(T.attached_class) + end + def self.new( + # A possible text token. + token:, + # The log probability of this token. + logprob:, + # The log probability of the top 20 most likely tokens. + top_logprobs: nil + ) + end + + sig do + override.returns( + { + token: String, + logprob: Float, + top_logprobs: + T::Array[ + OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob + ] + } + ) + end + def to_hash + end + + class TopLogprob < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob, + OpenAI::Internal::AnyHash + ) + end + + # A possible text token. + sig { returns(T.nilable(String)) } + attr_reader :token + + sig { params(token: String).void } + attr_writer :token + + # The log probability of this token. + sig { returns(T.nilable(Float)) } + attr_reader :logprob + + sig { params(logprob: Float).void } + attr_writer :logprob + + sig do + params(token: String, logprob: Float).returns(T.attached_class) + end + def self.new( + # A possible text token. + token: nil, + # The log probability of this token. + logprob: nil + ) + end + + sig { override.returns({ token: String, logprob: Float }) } + def to_hash + end + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_usage.rbi b/rbi/openai/models/responses/response_usage.rbi new file mode 100644 index 00000000..34f8734a --- /dev/null +++ b/rbi/openai/models/responses/response_usage.rbi @@ -0,0 +1,147 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseUsage < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::ResponseUsage, OpenAI::Internal::AnyHash) + end + + # The number of input tokens. + sig { returns(Integer) } + attr_accessor :input_tokens + + # A detailed breakdown of the input tokens. + sig { returns(OpenAI::Responses::ResponseUsage::InputTokensDetails) } + attr_reader :input_tokens_details + + sig do + params( + input_tokens_details: + OpenAI::Responses::ResponseUsage::InputTokensDetails::OrHash + ).void + end + attr_writer :input_tokens_details + + # The number of output tokens. + sig { returns(Integer) } + attr_accessor :output_tokens + + # A detailed breakdown of the output tokens. + sig { returns(OpenAI::Responses::ResponseUsage::OutputTokensDetails) } + attr_reader :output_tokens_details + + sig do + params( + output_tokens_details: + OpenAI::Responses::ResponseUsage::OutputTokensDetails::OrHash + ).void + end + attr_writer :output_tokens_details + + # The total number of tokens used. + sig { returns(Integer) } + attr_accessor :total_tokens + + # Represents token usage details including input tokens, output tokens, a + # breakdown of output tokens, and the total tokens used. + sig do + params( + input_tokens: Integer, + input_tokens_details: + OpenAI::Responses::ResponseUsage::InputTokensDetails::OrHash, + output_tokens: Integer, + output_tokens_details: + OpenAI::Responses::ResponseUsage::OutputTokensDetails::OrHash, + total_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of input tokens. + input_tokens:, + # A detailed breakdown of the input tokens. + input_tokens_details:, + # The number of output tokens. + output_tokens:, + # A detailed breakdown of the output tokens. + output_tokens_details:, + # The total number of tokens used. + total_tokens: + ) + end + + sig do + override.returns( + { + input_tokens: Integer, + input_tokens_details: + OpenAI::Responses::ResponseUsage::InputTokensDetails, + output_tokens: Integer, + output_tokens_details: + OpenAI::Responses::ResponseUsage::OutputTokensDetails, + total_tokens: Integer + } + ) + end + def to_hash + end + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseUsage::InputTokensDetails, + OpenAI::Internal::AnyHash + ) + end + + # The number of tokens that were retrieved from the cache. + # [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + sig { returns(Integer) } + attr_accessor :cached_tokens + + # A detailed breakdown of the input tokens. + sig { params(cached_tokens: Integer).returns(T.attached_class) } + def self.new( + # The number of tokens that were retrieved from the cache. + # [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + cached_tokens: + ) + end + + sig { override.returns({ cached_tokens: Integer }) } + def to_hash + end + end + + class OutputTokensDetails < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseUsage::OutputTokensDetails, + OpenAI::Internal::AnyHash + ) + end + + # The number of reasoning tokens. + sig { returns(Integer) } + attr_accessor :reasoning_tokens + + # A detailed breakdown of the output tokens. + sig { params(reasoning_tokens: Integer).returns(T.attached_class) } + def self.new( + # The number of reasoning tokens. + reasoning_tokens: + ) + end + + sig { override.returns({ reasoning_tokens: Integer }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_web_search_call_completed_event.rbi b/rbi/openai/models/responses/response_web_search_call_completed_event.rbi new file mode 100644 index 00000000..78f9a4e9 --- /dev/null +++ b/rbi/openai/models/responses/response_web_search_call_completed_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseWebSearchCallCompletedEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseWebSearchCallCompletedEvent, + OpenAI::Internal::AnyHash + ) + end + + # Unique ID for the output item associated with the web search call. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the web search call is associated with. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of the web search call being processed. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.web_search_call.completed`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a web search call is completed. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique ID for the output item associated with the web search call. + item_id:, + # The index of the output item that the web search call is associated with. + output_index:, + # The sequence number of the web search call being processed. + sequence_number:, + # The type of the event. Always `response.web_search_call.completed`. + type: :"response.web_search_call.completed" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_web_search_call_in_progress_event.rbi b/rbi/openai/models/responses/response_web_search_call_in_progress_event.rbi new file mode 100644 index 00000000..8fc0415b --- /dev/null +++ b/rbi/openai/models/responses/response_web_search_call_in_progress_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseWebSearchCallInProgressEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseWebSearchCallInProgressEvent, + OpenAI::Internal::AnyHash + ) + end + + # Unique ID for the output item associated with the web search call. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the web search call is associated with. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of the web search call being processed. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.web_search_call.in_progress`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a web search call is initiated. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique ID for the output item associated with the web search call. + item_id:, + # The index of the output item that the web search call is associated with. + output_index:, + # The sequence number of the web search call being processed. + sequence_number:, + # The type of the event. Always `response.web_search_call.in_progress`. + type: :"response.web_search_call.in_progress" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_web_search_call_searching_event.rbi b/rbi/openai/models/responses/response_web_search_call_searching_event.rbi new file mode 100644 index 00000000..15ce4ac3 --- /dev/null +++ b/rbi/openai/models/responses/response_web_search_call_searching_event.rbi @@ -0,0 +1,67 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseWebSearchCallSearchingEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseWebSearchCallSearchingEvent, + OpenAI::Internal::AnyHash + ) + end + + # Unique ID for the output item associated with the web search call. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item that the web search call is associated with. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of the web search call being processed. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.web_search_call.searching`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a web search call is executing. + sig do + params( + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Unique ID for the output item associated with the web search call. + item_id:, + # The index of the output item that the web search call is associated with. + output_index:, + # The sequence number of the web search call being processed. + sequence_number:, + # The type of the event. Always `response.web_search_call.searching`. + type: :"response.web_search_call.searching" + ) + end + + sig do + override.returns( + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/tool.rbi b/rbi/openai/models/responses/tool.rbi new file mode 100644 index 00000000..7764d3a5 --- /dev/null +++ b/rbi/openai/models/responses/tool.rbi @@ -0,0 +1,1348 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # A tool that can be used to generate a response. + module Tool + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::FunctionTool, + OpenAI::Responses::FileSearchTool, + OpenAI::Responses::ComputerTool, + OpenAI::Responses::Tool::Mcp, + OpenAI::Responses::Tool::CodeInterpreter, + OpenAI::Responses::Tool::ImageGeneration, + OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::CustomTool, + OpenAI::Responses::WebSearchTool + ) + end + + class Mcp < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::Tool::Mcp, OpenAI::Internal::AnyHash) + end + + # A label for this MCP server, used to identify it in tool calls. + sig { returns(String) } + attr_accessor :server_label + + # The type of the MCP tool. Always `mcp`. + sig { returns(Symbol) } + attr_accessor :type + + # List of allowed tool names or a filter object. + sig do + returns( + T.nilable( + T.any( + T::Array[String], + OpenAI::Responses::Tool::Mcp::AllowedTools::McpToolFilter + ) + ) + ) + end + attr_accessor :allowed_tools + + # An OAuth access token that can be used with a remote MCP server, either with a + # custom MCP server URL or a service connector. Your application must handle the + # OAuth authorization flow and provide the token here. + sig { returns(T.nilable(String)) } + attr_reader :authorization + + sig { params(authorization: String).void } + attr_writer :authorization + + # Identifier for service connectors, like those available in ChatGPT. One of + # `server_url` or `connector_id` must be provided. Learn more about service + # connectors + # [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + # + # Currently supported `connector_id` values are: + # + # - Dropbox: `connector_dropbox` + # - Gmail: `connector_gmail` + # - Google Calendar: `connector_googlecalendar` + # - Google Drive: `connector_googledrive` + # - Microsoft Teams: `connector_microsoftteams` + # - Outlook Calendar: `connector_outlookcalendar` + # - Outlook Email: `connector_outlookemail` + # - SharePoint: `connector_sharepoint` + sig do + returns( + T.nilable(OpenAI::Responses::Tool::Mcp::ConnectorID::OrSymbol) + ) + end + attr_reader :connector_id + + sig do + params( + connector_id: OpenAI::Responses::Tool::Mcp::ConnectorID::OrSymbol + ).void + end + attr_writer :connector_id + + # Optional HTTP headers to send to the MCP server. Use for authentication or other + # purposes. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :headers + + # Specify which of the MCP server's tools require approval. + sig do + returns( + T.nilable( + T.any( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting::OrSymbol + ) + ) + ) + end + attr_accessor :require_approval + + # Optional description of the MCP server, used to provide more context. + sig { returns(T.nilable(String)) } + attr_reader :server_description + + sig { params(server_description: String).void } + attr_writer :server_description + + # The URL for the MCP server. One of `server_url` or `connector_id` must be + # provided. + sig { returns(T.nilable(String)) } + attr_reader :server_url + + sig { params(server_url: String).void } + attr_writer :server_url + + # Give the model access to additional tools via remote Model Context Protocol + # (MCP) servers. + # [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + sig do + params( + server_label: String, + allowed_tools: + T.nilable( + T.any( + T::Array[String], + OpenAI::Responses::Tool::Mcp::AllowedTools::McpToolFilter::OrHash + ) + ), + authorization: String, + connector_id: OpenAI::Responses::Tool::Mcp::ConnectorID::OrSymbol, + headers: T.nilable(T::Hash[Symbol, String]), + require_approval: + T.nilable( + T.any( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::OrHash, + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting::OrSymbol + ) + ), + server_description: String, + server_url: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # A label for this MCP server, used to identify it in tool calls. + server_label:, + # List of allowed tool names or a filter object. + allowed_tools: nil, + # An OAuth access token that can be used with a remote MCP server, either with a + # custom MCP server URL or a service connector. Your application must handle the + # OAuth authorization flow and provide the token here. + authorization: nil, + # Identifier for service connectors, like those available in ChatGPT. One of + # `server_url` or `connector_id` must be provided. Learn more about service + # connectors + # [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + # + # Currently supported `connector_id` values are: + # + # - Dropbox: `connector_dropbox` + # - Gmail: `connector_gmail` + # - Google Calendar: `connector_googlecalendar` + # - Google Drive: `connector_googledrive` + # - Microsoft Teams: `connector_microsoftteams` + # - Outlook Calendar: `connector_outlookcalendar` + # - Outlook Email: `connector_outlookemail` + # - SharePoint: `connector_sharepoint` + connector_id: nil, + # Optional HTTP headers to send to the MCP server. Use for authentication or other + # purposes. + headers: nil, + # Specify which of the MCP server's tools require approval. + require_approval: nil, + # Optional description of the MCP server, used to provide more context. + server_description: nil, + # The URL for the MCP server. One of `server_url` or `connector_id` must be + # provided. + server_url: nil, + # The type of the MCP tool. Always `mcp`. + type: :mcp + ) + end + + sig do + override.returns( + { + server_label: String, + type: Symbol, + allowed_tools: + T.nilable( + T.any( + T::Array[String], + OpenAI::Responses::Tool::Mcp::AllowedTools::McpToolFilter + ) + ), + authorization: String, + connector_id: + OpenAI::Responses::Tool::Mcp::ConnectorID::OrSymbol, + headers: T.nilable(T::Hash[Symbol, String]), + require_approval: + T.nilable( + T.any( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting::OrSymbol + ) + ), + server_description: String, + server_url: String + } + ) + end + def to_hash + end + + # List of allowed tool names or a filter object. + module AllowedTools + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + T::Array[String], + OpenAI::Responses::Tool::Mcp::AllowedTools::McpToolFilter + ) + end + + class McpToolFilter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Tool::Mcp::AllowedTools::McpToolFilter, + OpenAI::Internal::AnyHash + ) + end + + # Indicates whether or not a tool modifies data or is read-only. If an MCP server + # is + # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + # it will match this filter. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :read_only + + sig { params(read_only: T::Boolean).void } + attr_writer :read_only + + # List of allowed tool names. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :tool_names + + sig { params(tool_names: T::Array[String]).void } + attr_writer :tool_names + + # A filter object to specify which tools are allowed. + sig do + params( + read_only: T::Boolean, + tool_names: T::Array[String] + ).returns(T.attached_class) + end + def self.new( + # Indicates whether or not a tool modifies data or is read-only. If an MCP server + # is + # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + # it will match this filter. + read_only: nil, + # List of allowed tool names. + tool_names: nil + ) + end + + sig do + override.returns( + { read_only: T::Boolean, tool_names: T::Array[String] } + ) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[OpenAI::Responses::Tool::Mcp::AllowedTools::Variants] + ) + end + def self.variants + end + + StringArray = + T.let( + OpenAI::Internal::Type::ArrayOf[String], + OpenAI::Internal::Type::Converter + ) + end + + # Identifier for service connectors, like those available in ChatGPT. One of + # `server_url` or `connector_id` must be provided. Learn more about service + # connectors + # [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + # + # Currently supported `connector_id` values are: + # + # - Dropbox: `connector_dropbox` + # - Gmail: `connector_gmail` + # - Google Calendar: `connector_googlecalendar` + # - Google Drive: `connector_googledrive` + # - Microsoft Teams: `connector_microsoftteams` + # - Outlook Calendar: `connector_outlookcalendar` + # - Outlook Email: `connector_outlookemail` + # - SharePoint: `connector_sharepoint` + module ConnectorID + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::Tool::Mcp::ConnectorID) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + CONNECTOR_DROPBOX = + T.let( + :connector_dropbox, + OpenAI::Responses::Tool::Mcp::ConnectorID::TaggedSymbol + ) + CONNECTOR_GMAIL = + T.let( + :connector_gmail, + OpenAI::Responses::Tool::Mcp::ConnectorID::TaggedSymbol + ) + CONNECTOR_GOOGLECALENDAR = + T.let( + :connector_googlecalendar, + OpenAI::Responses::Tool::Mcp::ConnectorID::TaggedSymbol + ) + CONNECTOR_GOOGLEDRIVE = + T.let( + :connector_googledrive, + OpenAI::Responses::Tool::Mcp::ConnectorID::TaggedSymbol + ) + CONNECTOR_MICROSOFTTEAMS = + T.let( + :connector_microsoftteams, + OpenAI::Responses::Tool::Mcp::ConnectorID::TaggedSymbol + ) + CONNECTOR_OUTLOOKCALENDAR = + T.let( + :connector_outlookcalendar, + OpenAI::Responses::Tool::Mcp::ConnectorID::TaggedSymbol + ) + CONNECTOR_OUTLOOKEMAIL = + T.let( + :connector_outlookemail, + OpenAI::Responses::Tool::Mcp::ConnectorID::TaggedSymbol + ) + CONNECTOR_SHAREPOINT = + T.let( + :connector_sharepoint, + OpenAI::Responses::Tool::Mcp::ConnectorID::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::Mcp::ConnectorID::TaggedSymbol + ] + ) + end + def self.values + end + end + + # Specify which of the MCP server's tools require approval. + module RequireApproval + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting::TaggedSymbol + ) + end + + class McpToolApprovalFilter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, + OpenAI::Internal::AnyHash + ) + end + + # A filter object to specify which tools are allowed. + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always + ) + ) + end + attr_reader :always + + sig do + params( + always: + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always::OrHash + ).void + end + attr_writer :always + + # A filter object to specify which tools are allowed. + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never + ) + ) + end + attr_reader :never + + sig do + params( + never: + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never::OrHash + ).void + end + attr_writer :never + + # Specify which of the MCP server's tools require approval. Can be `always`, + # `never`, or a filter object associated with tools that require approval. + sig do + params( + always: + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always::OrHash, + never: + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never::OrHash + ).returns(T.attached_class) + end + def self.new( + # A filter object to specify which tools are allowed. + always: nil, + # A filter object to specify which tools are allowed. + never: nil + ) + end + + sig do + override.returns( + { + always: + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always, + never: + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never + } + ) + end + def to_hash + end + + class Always < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always, + OpenAI::Internal::AnyHash + ) + end + + # Indicates whether or not a tool modifies data or is read-only. If an MCP server + # is + # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + # it will match this filter. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :read_only + + sig { params(read_only: T::Boolean).void } + attr_writer :read_only + + # List of allowed tool names. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :tool_names + + sig { params(tool_names: T::Array[String]).void } + attr_writer :tool_names + + # A filter object to specify which tools are allowed. + sig do + params( + read_only: T::Boolean, + tool_names: T::Array[String] + ).returns(T.attached_class) + end + def self.new( + # Indicates whether or not a tool modifies data or is read-only. If an MCP server + # is + # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + # it will match this filter. + read_only: nil, + # List of allowed tool names. + tool_names: nil + ) + end + + sig do + override.returns( + { read_only: T::Boolean, tool_names: T::Array[String] } + ) + end + def to_hash + end + end + + class Never < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never, + OpenAI::Internal::AnyHash + ) + end + + # Indicates whether or not a tool modifies data or is read-only. If an MCP server + # is + # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + # it will match this filter. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :read_only + + sig { params(read_only: T::Boolean).void } + attr_writer :read_only + + # List of allowed tool names. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :tool_names + + sig { params(tool_names: T::Array[String]).void } + attr_writer :tool_names + + # A filter object to specify which tools are allowed. + sig do + params( + read_only: T::Boolean, + tool_names: T::Array[String] + ).returns(T.attached_class) + end + def self.new( + # Indicates whether or not a tool modifies data or is read-only. If an MCP server + # is + # [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + # it will match this filter. + read_only: nil, + # List of allowed tool names. + tool_names: nil + ) + end + + sig do + override.returns( + { read_only: T::Boolean, tool_names: T::Array[String] } + ) + end + def to_hash + end + end + end + + # Specify a single approval policy for all tools. One of `always` or `never`. When + # set to `always`, all tools will require approval. When set to `never`, all tools + # will not require approval. + module McpToolApprovalSetting + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ALWAYS = + T.let( + :always, + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting::TaggedSymbol + ) + NEVER = + T.let( + :never, + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting::TaggedSymbol + ] + ) + end + def self.values + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::Mcp::RequireApproval::Variants + ] + ) + end + def self.variants + end + end + end + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Tool::CodeInterpreter, + OpenAI::Internal::AnyHash + ) + end + + # The code interpreter container. Can be a container ID or an object that + # specifies uploaded file IDs to make available to your code. + sig do + returns( + T.any( + String, + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto + ) + ) + end + attr_accessor :container + + # The type of the code interpreter tool. Always `code_interpreter`. + sig { returns(Symbol) } + attr_accessor :type + + # A tool that runs Python code to help generate a response to a prompt. + sig do + params( + container: + T.any( + String, + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::OrHash + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The code interpreter container. Can be a container ID or an object that + # specifies uploaded file IDs to make available to your code. + container:, + # The type of the code interpreter tool. Always `code_interpreter`. + type: :code_interpreter + ) + end + + sig do + override.returns( + { + container: + T.any( + String, + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto + ), + type: Symbol + } + ) + end + def to_hash + end + + # The code interpreter container. Can be a container ID or an object that + # specifies uploaded file IDs to make available to your code. + module Container + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto + ) + end + + class CodeInterpreterToolAuto < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto, + OpenAI::Internal::AnyHash + ) + end + + # Always `auto`. + sig { returns(Symbol) } + attr_accessor :type + + # An optional list of uploaded files to make available to your code. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + # Configuration for a code interpreter container. Optionally specify the IDs of + # the files to run the code on. + sig do + params(file_ids: T::Array[String], type: Symbol).returns( + T.attached_class + ) + end + def self.new( + # An optional list of uploaded files to make available to your code. + file_ids: nil, + # Always `auto`. + type: :auto + ) + end + + sig do + override.returns({ type: Symbol, file_ids: T::Array[String] }) + end + def to_hash + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::CodeInterpreter::Container::Variants + ] + ) + end + def self.variants + end + end + end + + class ImageGeneration < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Tool::ImageGeneration, + OpenAI::Internal::AnyHash + ) + end + + # The type of the image generation tool. Always `image_generation`. + sig { returns(Symbol) } + attr_accessor :type + + # Background type for the generated image. One of `transparent`, `opaque`, or + # `auto`. Default: `auto`. + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::Background::OrSymbol + ) + ) + end + attr_reader :background + + sig do + params( + background: + OpenAI::Responses::Tool::ImageGeneration::Background::OrSymbol + ).void + end + attr_writer :background + + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::OrSymbol + ) + ) + end + attr_accessor :input_fidelity + + # Optional mask for inpainting. Contains `image_url` (string, optional) and + # `file_id` (string, optional). + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::InputImageMask + ) + ) + end + attr_reader :input_image_mask + + sig do + params( + input_image_mask: + OpenAI::Responses::Tool::ImageGeneration::InputImageMask::OrHash + ).void + end + attr_writer :input_image_mask + + # The image generation model to use. Default: `gpt-image-1`. + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol + ) + ) + end + attr_reader :model + + sig do + params( + model: OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol + ).void + end + attr_writer :model + + # Moderation level for the generated image. Default: `auto`. + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::Moderation::OrSymbol + ) + ) + end + attr_reader :moderation + + sig do + params( + moderation: + OpenAI::Responses::Tool::ImageGeneration::Moderation::OrSymbol + ).void + end + attr_writer :moderation + + # Compression level for the output image. Default: 100. + sig { returns(T.nilable(Integer)) } + attr_reader :output_compression + + sig { params(output_compression: Integer).void } + attr_writer :output_compression + + # The output format of the generated image. One of `png`, `webp`, or `jpeg`. + # Default: `png`. + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::OutputFormat::OrSymbol + ) + ) + end + attr_reader :output_format + + sig do + params( + output_format: + OpenAI::Responses::Tool::ImageGeneration::OutputFormat::OrSymbol + ).void + end + attr_writer :output_format + + # Number of partial images to generate in streaming mode, from 0 (default value) + # to 3. + sig { returns(T.nilable(Integer)) } + attr_reader :partial_images + + sig { params(partial_images: Integer).void } + attr_writer :partial_images + + # The quality of the generated image. One of `low`, `medium`, `high`, or `auto`. + # Default: `auto`. + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::Quality::OrSymbol + ) + ) + end + attr_reader :quality + + sig do + params( + quality: + OpenAI::Responses::Tool::ImageGeneration::Quality::OrSymbol + ).void + end + attr_writer :quality + + # The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`, + # or `auto`. Default: `auto`. + sig do + returns( + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::Size::OrSymbol + ) + ) + end + attr_reader :size + + sig do + params( + size: OpenAI::Responses::Tool::ImageGeneration::Size::OrSymbol + ).void + end + attr_writer :size + + # A tool that generates images using a model like `gpt-image-1`. + sig do + params( + background: + OpenAI::Responses::Tool::ImageGeneration::Background::OrSymbol, + input_fidelity: + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::OrSymbol + ), + input_image_mask: + OpenAI::Responses::Tool::ImageGeneration::InputImageMask::OrHash, + model: OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol, + moderation: + OpenAI::Responses::Tool::ImageGeneration::Moderation::OrSymbol, + output_compression: Integer, + output_format: + OpenAI::Responses::Tool::ImageGeneration::OutputFormat::OrSymbol, + partial_images: Integer, + quality: + OpenAI::Responses::Tool::ImageGeneration::Quality::OrSymbol, + size: OpenAI::Responses::Tool::ImageGeneration::Size::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Background type for the generated image. One of `transparent`, `opaque`, or + # `auto`. Default: `auto`. + background: nil, + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: nil, + # Optional mask for inpainting. Contains `image_url` (string, optional) and + # `file_id` (string, optional). + input_image_mask: nil, + # The image generation model to use. Default: `gpt-image-1`. + model: nil, + # Moderation level for the generated image. Default: `auto`. + moderation: nil, + # Compression level for the output image. Default: 100. + output_compression: nil, + # The output format of the generated image. One of `png`, `webp`, or `jpeg`. + # Default: `png`. + output_format: nil, + # Number of partial images to generate in streaming mode, from 0 (default value) + # to 3. + partial_images: nil, + # The quality of the generated image. One of `low`, `medium`, `high`, or `auto`. + # Default: `auto`. + quality: nil, + # The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`, + # or `auto`. Default: `auto`. + size: nil, + # The type of the image generation tool. Always `image_generation`. + type: :image_generation + ) + end + + sig do + override.returns( + { + type: Symbol, + background: + OpenAI::Responses::Tool::ImageGeneration::Background::OrSymbol, + input_fidelity: + T.nilable( + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::OrSymbol + ), + input_image_mask: + OpenAI::Responses::Tool::ImageGeneration::InputImageMask, + model: + OpenAI::Responses::Tool::ImageGeneration::Model::OrSymbol, + moderation: + OpenAI::Responses::Tool::ImageGeneration::Moderation::OrSymbol, + output_compression: Integer, + output_format: + OpenAI::Responses::Tool::ImageGeneration::OutputFormat::OrSymbol, + partial_images: Integer, + quality: + OpenAI::Responses::Tool::ImageGeneration::Quality::OrSymbol, + size: OpenAI::Responses::Tool::ImageGeneration::Size::OrSymbol + } + ) + end + def to_hash + end + + # Background type for the generated image. One of `transparent`, `opaque`, or + # `auto`. Default: `auto`. + module Background + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::Tool::ImageGeneration::Background + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TRANSPARENT = + T.let( + :transparent, + OpenAI::Responses::Tool::ImageGeneration::Background::TaggedSymbol + ) + OPAQUE = + T.let( + :opaque, + OpenAI::Responses::Tool::ImageGeneration::Background::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::Responses::Tool::ImageGeneration::Background::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::ImageGeneration::Background::TaggedSymbol + ] + ) + end + def self.values + end + end + + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + module InputFidelity + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::Tool::ImageGeneration::InputFidelity + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + HIGH = + T.let( + :high, + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::TaggedSymbol + ) + LOW = + T.let( + :low, + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::ImageGeneration::InputFidelity::TaggedSymbol + ] + ) + end + def self.values + end + end + + class InputImageMask < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Tool::ImageGeneration::InputImageMask, + OpenAI::Internal::AnyHash + ) + end + + # File ID for the mask image. + sig { returns(T.nilable(String)) } + attr_reader :file_id + + sig { params(file_id: String).void } + attr_writer :file_id + + # Base64-encoded mask image. + sig { returns(T.nilable(String)) } + attr_reader :image_url + + sig { params(image_url: String).void } + attr_writer :image_url + + # Optional mask for inpainting. Contains `image_url` (string, optional) and + # `file_id` (string, optional). + sig do + params(file_id: String, image_url: String).returns( + T.attached_class + ) + end + def self.new( + # File ID for the mask image. + file_id: nil, + # Base64-encoded mask image. + image_url: nil + ) + end + + sig { override.returns({ file_id: String, image_url: String }) } + def to_hash + end + end + + # The image generation model to use. Default: `gpt-image-1`. + module Model + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::Tool::ImageGeneration::Model) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + GPT_IMAGE_1 = + T.let( + :"gpt-image-1", + OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::ImageGeneration::Model::TaggedSymbol + ] + ) + end + def self.values + end + end + + # Moderation level for the generated image. Default: `auto`. + module Moderation + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::Tool::ImageGeneration::Moderation + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Responses::Tool::ImageGeneration::Moderation::TaggedSymbol + ) + LOW = + T.let( + :low, + OpenAI::Responses::Tool::ImageGeneration::Moderation::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::ImageGeneration::Moderation::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The output format of the generated image. One of `png`, `webp`, or `jpeg`. + # Default: `png`. + module OutputFormat + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Responses::Tool::ImageGeneration::OutputFormat + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PNG = + T.let( + :png, + OpenAI::Responses::Tool::ImageGeneration::OutputFormat::TaggedSymbol + ) + WEBP = + T.let( + :webp, + OpenAI::Responses::Tool::ImageGeneration::OutputFormat::TaggedSymbol + ) + JPEG = + T.let( + :jpeg, + OpenAI::Responses::Tool::ImageGeneration::OutputFormat::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::ImageGeneration::OutputFormat::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The quality of the generated image. One of `low`, `medium`, `high`, or `auto`. + # Default: `auto`. + module Quality + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::Tool::ImageGeneration::Quality) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let( + :low, + OpenAI::Responses::Tool::ImageGeneration::Quality::TaggedSymbol + ) + MEDIUM = + T.let( + :medium, + OpenAI::Responses::Tool::ImageGeneration::Quality::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Responses::Tool::ImageGeneration::Quality::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::Responses::Tool::ImageGeneration::Quality::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::ImageGeneration::Quality::TaggedSymbol + ] + ) + end + def self.values + end + end + + # The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`, + # or `auto`. Default: `auto`. + module Size + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::Tool::ImageGeneration::Size) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SIZE_1024X1024 = + T.let( + :"1024x1024", + OpenAI::Responses::Tool::ImageGeneration::Size::TaggedSymbol + ) + SIZE_1024X1536 = + T.let( + :"1024x1536", + OpenAI::Responses::Tool::ImageGeneration::Size::TaggedSymbol + ) + SIZE_1536X1024 = + T.let( + :"1536x1024", + OpenAI::Responses::Tool::ImageGeneration::Size::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::Responses::Tool::ImageGeneration::Size::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::Tool::ImageGeneration::Size::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + class LocalShell < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::Tool::LocalShell, + OpenAI::Internal::AnyHash + ) + end + + # The type of the local shell tool. Always `local_shell`. + sig { returns(Symbol) } + attr_accessor :type + + # A tool that allows the model to execute shell commands in a local environment. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of the local shell tool. Always `local_shell`. + type: :local_shell + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + sig { override.returns(T::Array[OpenAI::Responses::Tool::Variants]) } + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/responses/tool_choice_allowed.rbi b/rbi/openai/models/responses/tool_choice_allowed.rbi new file mode 100644 index 00000000..47ee4cca --- /dev/null +++ b/rbi/openai/models/responses/tool_choice_allowed.rbi @@ -0,0 +1,124 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ToolChoiceAllowed < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ToolChoiceAllowed, + OpenAI::Internal::AnyHash + ) + end + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + sig { returns(OpenAI::Responses::ToolChoiceAllowed::Mode::OrSymbol) } + attr_accessor :mode + + # A list of tool definitions that the model should be allowed to call. + # + # For the Responses API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "name": "get_weather" }, + # { "type": "mcp", "server_label": "deepwiki" }, + # { "type": "image_generation" } + # ] + # ``` + sig { returns(T::Array[T::Hash[Symbol, T.anything]]) } + attr_accessor :tools + + # Allowed tool configuration type. Always `allowed_tools`. + sig { returns(Symbol) } + attr_accessor :type + + # Constrains the tools available to the model to a pre-defined set. + sig do + params( + mode: OpenAI::Responses::ToolChoiceAllowed::Mode::OrSymbol, + tools: T::Array[T::Hash[Symbol, T.anything]], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + mode:, + # A list of tool definitions that the model should be allowed to call. + # + # For the Responses API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "name": "get_weather" }, + # { "type": "mcp", "server_label": "deepwiki" }, + # { "type": "image_generation" } + # ] + # ``` + tools:, + # Allowed tool configuration type. Always `allowed_tools`. + type: :allowed_tools + ) + end + + sig do + override.returns( + { + mode: OpenAI::Responses::ToolChoiceAllowed::Mode::OrSymbol, + tools: T::Array[T::Hash[Symbol, T.anything]], + type: Symbol + } + ) + end + def to_hash + end + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + module Mode + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ToolChoiceAllowed::Mode) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Responses::ToolChoiceAllowed::Mode::TaggedSymbol + ) + REQUIRED = + T.let( + :required, + OpenAI::Responses::ToolChoiceAllowed::Mode::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Responses::ToolChoiceAllowed::Mode::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/tool_choice_custom.rbi b/rbi/openai/models/responses/tool_choice_custom.rbi new file mode 100644 index 00000000..d3944c11 --- /dev/null +++ b/rbi/openai/models/responses/tool_choice_custom.rbi @@ -0,0 +1,39 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ToolChoiceCustom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ToolChoiceCustom, + OpenAI::Internal::AnyHash + ) + end + + # The name of the custom tool to call. + sig { returns(String) } + attr_accessor :name + + # For custom tool calling, the type is always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # Use this option to force the model to call a specific custom tool. + sig { params(name: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The name of the custom tool to call. + name:, + # For custom tool calling, the type is always `custom`. + type: :custom + ) + end + + sig { override.returns({ name: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/tool_choice_function.rbi b/rbi/openai/models/responses/tool_choice_function.rbi new file mode 100644 index 00000000..e74411c8 --- /dev/null +++ b/rbi/openai/models/responses/tool_choice_function.rbi @@ -0,0 +1,39 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ToolChoiceFunction < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ToolChoiceFunction, + OpenAI::Internal::AnyHash + ) + end + + # The name of the function to call. + sig { returns(String) } + attr_accessor :name + + # For function calling, the type is always `function`. + sig { returns(Symbol) } + attr_accessor :type + + # Use this option to force the model to call a specific function. + sig { params(name: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The name of the function to call. + name:, + # For function calling, the type is always `function`. + type: :function + ) + end + + sig { override.returns({ name: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/tool_choice_mcp.rbi b/rbi/openai/models/responses/tool_choice_mcp.rbi new file mode 100644 index 00000000..7b2c782f --- /dev/null +++ b/rbi/openai/models/responses/tool_choice_mcp.rbi @@ -0,0 +1,53 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ToolChoiceMcp < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::ToolChoiceMcp, OpenAI::Internal::AnyHash) + end + + # The label of the MCP server to use. + sig { returns(String) } + attr_accessor :server_label + + # For MCP tools, the type is always `mcp`. + sig { returns(Symbol) } + attr_accessor :type + + # The name of the tool to call on the server. + sig { returns(T.nilable(String)) } + attr_accessor :name + + # Use this option to force the model to call a specific tool on a remote MCP + # server. + sig do + params( + server_label: String, + name: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The label of the MCP server to use. + server_label:, + # The name of the tool to call on the server. + name: nil, + # For MCP tools, the type is always `mcp`. + type: :mcp + ) + end + + sig do + override.returns( + { server_label: String, type: Symbol, name: T.nilable(String) } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/tool_choice_options.rbi b/rbi/openai/models/responses/tool_choice_options.rbi new file mode 100644 index 00000000..01f24950 --- /dev/null +++ b/rbi/openai/models/responses/tool_choice_options.rbi @@ -0,0 +1,36 @@ +# typed: strong + +module OpenAI + module Models + module Responses + # Controls which (if any) tool is called by the model. + # + # `none` means the model will not call any tool and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling one or + # more tools. + # + # `required` means the model must call one or more tools. + module ToolChoiceOptions + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::Responses::ToolChoiceOptions) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + NONE = T.let(:none, OpenAI::Responses::ToolChoiceOptions::TaggedSymbol) + AUTO = T.let(:auto, OpenAI::Responses::ToolChoiceOptions::TaggedSymbol) + REQUIRED = + T.let(:required, OpenAI::Responses::ToolChoiceOptions::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::Responses::ToolChoiceOptions::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/responses/tool_choice_types.rbi b/rbi/openai/models/responses/tool_choice_types.rbi new file mode 100644 index 00000000..d56cb2eb --- /dev/null +++ b/rbi/openai/models/responses/tool_choice_types.rbi @@ -0,0 +1,116 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ToolChoiceTypes < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::ToolChoiceTypes, OpenAI::Internal::AnyHash) + end + + # The type of hosted tool the model should to use. Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # + # Allowed values are: + # + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` + # - `code_interpreter` + # - `image_generation` + sig { returns(OpenAI::Responses::ToolChoiceTypes::Type::OrSymbol) } + attr_accessor :type + + # Indicates that the model should use a built-in tool to generate a response. + # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + sig do + params( + type: OpenAI::Responses::ToolChoiceTypes::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The type of hosted tool the model should to use. Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # + # Allowed values are: + # + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` + # - `code_interpreter` + # - `image_generation` + type: + ) + end + + sig do + override.returns( + { type: OpenAI::Responses::ToolChoiceTypes::Type::OrSymbol } + ) + end + def to_hash + end + + # The type of hosted tool the model should to use. Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # + # Allowed values are: + # + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` + # - `code_interpreter` + # - `image_generation` + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ToolChoiceTypes::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + FILE_SEARCH = + T.let( + :file_search, + OpenAI::Responses::ToolChoiceTypes::Type::TaggedSymbol + ) + WEB_SEARCH_PREVIEW = + T.let( + :web_search_preview, + OpenAI::Responses::ToolChoiceTypes::Type::TaggedSymbol + ) + COMPUTER_USE_PREVIEW = + T.let( + :computer_use_preview, + OpenAI::Responses::ToolChoiceTypes::Type::TaggedSymbol + ) + WEB_SEARCH_PREVIEW_2025_03_11 = + T.let( + :web_search_preview_2025_03_11, + OpenAI::Responses::ToolChoiceTypes::Type::TaggedSymbol + ) + IMAGE_GENERATION = + T.let( + :image_generation, + OpenAI::Responses::ToolChoiceTypes::Type::TaggedSymbol + ) + CODE_INTERPRETER = + T.let( + :code_interpreter, + OpenAI::Responses::ToolChoiceTypes::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Responses::ToolChoiceTypes::Type::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/web_search_tool.rbi b/rbi/openai/models/responses/web_search_tool.rbi new file mode 100644 index 00000000..5d05f938 --- /dev/null +++ b/rbi/openai/models/responses/web_search_tool.rbi @@ -0,0 +1,231 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class WebSearchTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::WebSearchTool, OpenAI::Internal::AnyHash) + end + + # The type of the web search tool. One of `web_search_preview` or + # `web_search_preview_2025_03_11`. + sig { returns(OpenAI::Responses::WebSearchTool::Type::OrSymbol) } + attr_accessor :type + + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. + sig do + returns( + T.nilable( + OpenAI::Responses::WebSearchTool::SearchContextSize::OrSymbol + ) + ) + end + attr_reader :search_context_size + + sig do + params( + search_context_size: + OpenAI::Responses::WebSearchTool::SearchContextSize::OrSymbol + ).void + end + attr_writer :search_context_size + + # The user's location. + sig do + returns(T.nilable(OpenAI::Responses::WebSearchTool::UserLocation)) + end + attr_reader :user_location + + sig do + params( + user_location: + T.nilable(OpenAI::Responses::WebSearchTool::UserLocation::OrHash) + ).void + end + attr_writer :user_location + + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + sig do + params( + type: OpenAI::Responses::WebSearchTool::Type::OrSymbol, + search_context_size: + OpenAI::Responses::WebSearchTool::SearchContextSize::OrSymbol, + user_location: + T.nilable(OpenAI::Responses::WebSearchTool::UserLocation::OrHash) + ).returns(T.attached_class) + end + def self.new( + # The type of the web search tool. One of `web_search_preview` or + # `web_search_preview_2025_03_11`. + type:, + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. + search_context_size: nil, + # The user's location. + user_location: nil + ) + end + + sig do + override.returns( + { + type: OpenAI::Responses::WebSearchTool::Type::OrSymbol, + search_context_size: + OpenAI::Responses::WebSearchTool::SearchContextSize::OrSymbol, + user_location: + T.nilable(OpenAI::Responses::WebSearchTool::UserLocation) + } + ) + end + def to_hash + end + + # The type of the web search tool. One of `web_search_preview` or + # `web_search_preview_2025_03_11`. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::WebSearchTool::Type) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + WEB_SEARCH_PREVIEW = + T.let( + :web_search_preview, + OpenAI::Responses::WebSearchTool::Type::TaggedSymbol + ) + WEB_SEARCH_PREVIEW_2025_03_11 = + T.let( + :web_search_preview_2025_03_11, + OpenAI::Responses::WebSearchTool::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Responses::WebSearchTool::Type::TaggedSymbol] + ) + end + def self.values + end + end + + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. + module SearchContextSize + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::WebSearchTool::SearchContextSize) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let( + :low, + OpenAI::Responses::WebSearchTool::SearchContextSize::TaggedSymbol + ) + MEDIUM = + T.let( + :medium, + OpenAI::Responses::WebSearchTool::SearchContextSize::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Responses::WebSearchTool::SearchContextSize::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::WebSearchTool::SearchContextSize::TaggedSymbol + ] + ) + end + def self.values + end + end + + class UserLocation < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::WebSearchTool::UserLocation, + OpenAI::Internal::AnyHash + ) + end + + # The type of location approximation. Always `approximate`. + sig { returns(Symbol) } + attr_accessor :type + + # Free text input for the city of the user, e.g. `San Francisco`. + sig { returns(T.nilable(String)) } + attr_accessor :city + + # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + # the user, e.g. `US`. + sig { returns(T.nilable(String)) } + attr_accessor :country + + # Free text input for the region of the user, e.g. `California`. + sig { returns(T.nilable(String)) } + attr_accessor :region + + # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + # user, e.g. `America/Los_Angeles`. + sig { returns(T.nilable(String)) } + attr_accessor :timezone + + # The user's location. + sig do + params( + city: T.nilable(String), + country: T.nilable(String), + region: T.nilable(String), + timezone: T.nilable(String), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Free text input for the city of the user, e.g. `San Francisco`. + city: nil, + # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + # the user, e.g. `US`. + country: nil, + # Free text input for the region of the user, e.g. `California`. + region: nil, + # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + # user, e.g. `America/Los_Angeles`. + timezone: nil, + # The type of location approximation. Always `approximate`. + type: :approximate + ) + end + + sig do + override.returns( + { + type: Symbol, + city: T.nilable(String), + country: T.nilable(String), + region: T.nilable(String), + timezone: T.nilable(String) + } + ) + end + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/responses_model.rbi b/rbi/openai/models/responses_model.rbi new file mode 100644 index 00000000..b7ec74bf --- /dev/null +++ b/rbi/openai/models/responses_model.rbi @@ -0,0 +1,91 @@ +# typed: strong + +module OpenAI + module Models + module ResponsesModel + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + String, + OpenAI::ChatModel::TaggedSymbol, + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + end + + module ResponsesOnlyModel + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::ResponsesModel::ResponsesOnlyModel) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + O1_PRO = + T.let( + :"o1-pro", + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + O1_PRO_2025_03_19 = + T.let( + :"o1-pro-2025-03-19", + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + O3_PRO = + T.let( + :"o3-pro", + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + O3_PRO_2025_06_10 = + T.let( + :"o3-pro-2025-06-10", + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + O3_DEEP_RESEARCH = + T.let( + :"o3-deep-research", + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + O3_DEEP_RESEARCH_2025_06_26 = + T.let( + :"o3-deep-research-2025-06-26", + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + O4_MINI_DEEP_RESEARCH = + T.let( + :"o4-mini-deep-research", + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + O4_MINI_DEEP_RESEARCH_2025_06_26 = + T.let( + :"o4-mini-deep-research-2025-06-26", + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + COMPUTER_USE_PREVIEW = + T.let( + :"computer-use-preview", + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + COMPUTER_USE_PREVIEW_2025_03_11 = + T.let( + :"computer-use-preview-2025-03-11", + OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol] + ) + end + def self.values + end + end + + sig { override.returns(T::Array[OpenAI::ResponsesModel::Variants]) } + def self.variants + end + end + end +end diff --git a/rbi/openai/models/static_file_chunking_strategy.rbi b/rbi/openai/models/static_file_chunking_strategy.rbi new file mode 100644 index 00000000..a75f0b65 --- /dev/null +++ b/rbi/openai/models/static_file_chunking_strategy.rbi @@ -0,0 +1,48 @@ +# typed: strong + +module OpenAI + module Models + class StaticFileChunkingStrategy < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::StaticFileChunkingStrategy, OpenAI::Internal::AnyHash) + end + + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + sig { returns(Integer) } + attr_accessor :chunk_overlap_tokens + + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. + sig { returns(Integer) } + attr_accessor :max_chunk_size_tokens + + sig do + params( + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. + chunk_overlap_tokens:, + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. + max_chunk_size_tokens: + ) + end + + sig do + override.returns( + { chunk_overlap_tokens: Integer, max_chunk_size_tokens: Integer } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/static_file_chunking_strategy_object.rbi b/rbi/openai/models/static_file_chunking_strategy_object.rbi new file mode 100644 index 00000000..7dfe6e52 --- /dev/null +++ b/rbi/openai/models/static_file_chunking_strategy_object.rbi @@ -0,0 +1,46 @@ +# typed: strong + +module OpenAI + module Models + class StaticFileChunkingStrategyObject < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::StaticFileChunkingStrategyObject, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::StaticFileChunkingStrategy) } + attr_reader :static + + sig { params(static: OpenAI::StaticFileChunkingStrategy::OrHash).void } + attr_writer :static + + # Always `static`. + sig { returns(Symbol) } + attr_accessor :type + + sig do + params( + static: OpenAI::StaticFileChunkingStrategy::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + static:, + # Always `static`. + type: :static + ) + end + + sig do + override.returns( + { static: OpenAI::StaticFileChunkingStrategy, type: Symbol } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/static_file_chunking_strategy_object_param.rbi b/rbi/openai/models/static_file_chunking_strategy_object_param.rbi new file mode 100644 index 00000000..0197459c --- /dev/null +++ b/rbi/openai/models/static_file_chunking_strategy_object_param.rbi @@ -0,0 +1,47 @@ +# typed: strong + +module OpenAI + module Models + class StaticFileChunkingStrategyObjectParam < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::StaticFileChunkingStrategyObjectParam, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::StaticFileChunkingStrategy) } + attr_reader :static + + sig { params(static: OpenAI::StaticFileChunkingStrategy::OrHash).void } + attr_writer :static + + # Always `static`. + sig { returns(Symbol) } + attr_accessor :type + + # Customize your own chunking strategy by setting chunk size and chunk overlap. + sig do + params( + static: OpenAI::StaticFileChunkingStrategy::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + static:, + # Always `static`. + type: :static + ) + end + + sig do + override.returns( + { static: OpenAI::StaticFileChunkingStrategy, type: Symbol } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/upload.rbi b/rbi/openai/models/upload.rbi new file mode 100644 index 00000000..1c57dd2b --- /dev/null +++ b/rbi/openai/models/upload.rbi @@ -0,0 +1,123 @@ +# typed: strong + +module OpenAI + module Models + class Upload < OpenAI::Internal::Type::BaseModel + OrHash = T.type_alias { T.any(OpenAI::Upload, OpenAI::Internal::AnyHash) } + + # The Upload unique identifier, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The intended number of bytes to be uploaded. + sig { returns(Integer) } + attr_accessor :bytes + + # The Unix timestamp (in seconds) for when the Upload was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The Unix timestamp (in seconds) for when the Upload will expire. + sig { returns(Integer) } + attr_accessor :expires_at + + # The name of the file to be uploaded. + sig { returns(String) } + attr_accessor :filename + + # The object type, which is always "upload". + sig { returns(Symbol) } + attr_accessor :object + + # The intended purpose of the file. + # [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) + # for acceptable values. + sig { returns(String) } + attr_accessor :purpose + + # The status of the Upload. + sig { returns(OpenAI::Upload::Status::TaggedSymbol) } + attr_accessor :status + + # The `File` object represents a document that has been uploaded to OpenAI. + sig { returns(T.nilable(OpenAI::FileObject)) } + attr_reader :file + + sig { params(file: T.nilable(OpenAI::FileObject::OrHash)).void } + attr_writer :file + + # The Upload object can accept byte chunks in the form of Parts. + sig do + params( + id: String, + bytes: Integer, + created_at: Integer, + expires_at: Integer, + filename: String, + purpose: String, + status: OpenAI::Upload::Status::OrSymbol, + file: T.nilable(OpenAI::FileObject::OrHash), + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The Upload unique identifier, which can be referenced in API endpoints. + id:, + # The intended number of bytes to be uploaded. + bytes:, + # The Unix timestamp (in seconds) for when the Upload was created. + created_at:, + # The Unix timestamp (in seconds) for when the Upload will expire. + expires_at:, + # The name of the file to be uploaded. + filename:, + # The intended purpose of the file. + # [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) + # for acceptable values. + purpose:, + # The status of the Upload. + status:, + # The `File` object represents a document that has been uploaded to OpenAI. + file: nil, + # The object type, which is always "upload". + object: :upload + ) + end + + sig do + override.returns( + { + id: String, + bytes: Integer, + created_at: Integer, + expires_at: Integer, + filename: String, + object: Symbol, + purpose: String, + status: OpenAI::Upload::Status::TaggedSymbol, + file: T.nilable(OpenAI::FileObject) + } + ) + end + def to_hash + end + + # The status of the Upload. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::Upload::Status) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + PENDING = T.let(:pending, OpenAI::Upload::Status::TaggedSymbol) + COMPLETED = T.let(:completed, OpenAI::Upload::Status::TaggedSymbol) + CANCELLED = T.let(:cancelled, OpenAI::Upload::Status::TaggedSymbol) + EXPIRED = T.let(:expired, OpenAI::Upload::Status::TaggedSymbol) + + sig { override.returns(T::Array[OpenAI::Upload::Status::TaggedSymbol]) } + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/upload_cancel_params.rbi b/rbi/openai/models/upload_cancel_params.rbi new file mode 100644 index 00000000..7f48aa91 --- /dev/null +++ b/rbi/openai/models/upload_cancel_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class UploadCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::UploadCancelParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/upload_complete_params.rbi b/rbi/openai/models/upload_complete_params.rbi new file mode 100644 index 00000000..fc57184f --- /dev/null +++ b/rbi/openai/models/upload_complete_params.rbi @@ -0,0 +1,56 @@ +# typed: strong + +module OpenAI + module Models + class UploadCompleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::UploadCompleteParams, OpenAI::Internal::AnyHash) + end + + # The ordered list of Part IDs. + sig { returns(T::Array[String]) } + attr_accessor :part_ids + + # The optional md5 checksum for the file contents to verify if the bytes uploaded + # matches what you expect. + sig { returns(T.nilable(String)) } + attr_reader :md5 + + sig { params(md5: String).void } + attr_writer :md5 + + sig do + params( + part_ids: T::Array[String], + md5: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The ordered list of Part IDs. + part_ids:, + # The optional md5 checksum for the file contents to verify if the bytes uploaded + # matches what you expect. + md5: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + part_ids: T::Array[String], + md5: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/upload_create_params.rbi b/rbi/openai/models/upload_create_params.rbi new file mode 100644 index 00000000..63349340 --- /dev/null +++ b/rbi/openai/models/upload_create_params.rbi @@ -0,0 +1,135 @@ +# typed: strong + +module OpenAI + module Models + class UploadCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::UploadCreateParams, OpenAI::Internal::AnyHash) + end + + # The number of bytes in the file you are uploading. + sig { returns(Integer) } + attr_accessor :bytes + + # The name of the file to upload. + sig { returns(String) } + attr_accessor :filename + + # The MIME type of the file. + # + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. + sig { returns(String) } + attr_accessor :mime_type + + # The intended purpose of the uploaded file. + # + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + sig { returns(OpenAI::FilePurpose::OrSymbol) } + attr_accessor :purpose + + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + sig { returns(T.nilable(OpenAI::UploadCreateParams::ExpiresAfter)) } + attr_reader :expires_after + + sig do + params( + expires_after: OpenAI::UploadCreateParams::ExpiresAfter::OrHash + ).void + end + attr_writer :expires_after + + sig do + params( + bytes: Integer, + filename: String, + mime_type: String, + purpose: OpenAI::FilePurpose::OrSymbol, + expires_after: OpenAI::UploadCreateParams::ExpiresAfter::OrHash, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The number of bytes in the file you are uploading. + bytes:, + # The name of the file to upload. + filename:, + # The MIME type of the file. + # + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. + mime_type:, + # The intended purpose of the uploaded file. + # + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + purpose:, + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + expires_after: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + bytes: Integer, + filename: String, + mime_type: String, + purpose: OpenAI::FilePurpose::OrSymbol, + expires_after: OpenAI::UploadCreateParams::ExpiresAfter, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::UploadCreateParams::ExpiresAfter, + OpenAI::Internal::AnyHash + ) + end + + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `created_at`. + sig { returns(Symbol) } + attr_accessor :anchor + + # The number of seconds after the anchor time that the file will expire. Must be + # between 3600 (1 hour) and 2592000 (30 days). + sig { returns(Integer) } + attr_accessor :seconds + + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + sig do + params(seconds: Integer, anchor: Symbol).returns(T.attached_class) + end + def self.new( + # The number of seconds after the anchor time that the file will expire. Must be + # between 3600 (1 hour) and 2592000 (30 days). + seconds:, + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `created_at`. + anchor: :created_at + ) + end + + sig { override.returns({ anchor: Symbol, seconds: Integer }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/uploads/part_create_params.rbi b/rbi/openai/models/uploads/part_create_params.rbi new file mode 100644 index 00000000..031b224d --- /dev/null +++ b/rbi/openai/models/uploads/part_create_params.rbi @@ -0,0 +1,45 @@ +# typed: strong + +module OpenAI + module Models + module Uploads + class PartCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::Uploads::PartCreateParams, OpenAI::Internal::AnyHash) + end + + # The chunk of bytes for this Part. + sig { returns(OpenAI::Internal::FileInput) } + attr_accessor :data + + sig do + params( + data: OpenAI::Internal::FileInput, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The chunk of bytes for this Part. + data:, + request_options: {} + ) + end + + sig do + override.returns( + { + data: OpenAI::Internal::FileInput, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/uploads/upload_part.rbi b/rbi/openai/models/uploads/upload_part.rbi new file mode 100644 index 00000000..71d2d997 --- /dev/null +++ b/rbi/openai/models/uploads/upload_part.rbi @@ -0,0 +1,66 @@ +# typed: strong + +module OpenAI + module Models + UploadPart = Uploads::UploadPart + + module Uploads + class UploadPart < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Uploads::UploadPart, OpenAI::Internal::AnyHash) + end + + # The upload Part unique identifier, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the Part was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The object type, which is always `upload.part`. + sig { returns(Symbol) } + attr_accessor :object + + # The ID of the Upload object that this Part was added to. + sig { returns(String) } + attr_accessor :upload_id + + # The upload Part represents a chunk of bytes we can add to an Upload object. + sig do + params( + id: String, + created_at: Integer, + upload_id: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The upload Part unique identifier, which can be referenced in API endpoints. + id:, + # The Unix timestamp (in seconds) for when the Part was created. + created_at:, + # The ID of the Upload object that this Part was added to. + upload_id:, + # The object type, which is always `upload.part`. + object: :"upload.part" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + object: Symbol, + upload_id: String + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_store.rbi b/rbi/openai/models/vector_store.rbi new file mode 100644 index 00000000..1e5ec9ca --- /dev/null +++ b/rbi/openai/models/vector_store.rbi @@ -0,0 +1,254 @@ +# typed: strong + +module OpenAI + module Models + class VectorStore < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::VectorStore, OpenAI::Internal::AnyHash) } + + # The identifier, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the vector store was created. + sig { returns(Integer) } + attr_accessor :created_at + + sig { returns(OpenAI::VectorStore::FileCounts) } + attr_reader :file_counts + + sig { params(file_counts: OpenAI::VectorStore::FileCounts::OrHash).void } + attr_writer :file_counts + + # The Unix timestamp (in seconds) for when the vector store was last active. + sig { returns(T.nilable(Integer)) } + attr_accessor :last_active_at + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The name of the vector store. + sig { returns(String) } + attr_accessor :name + + # The object type, which is always `vector_store`. + sig { returns(Symbol) } + attr_accessor :object + + # The status of the vector store, which can be either `expired`, `in_progress`, or + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. + sig { returns(OpenAI::VectorStore::Status::TaggedSymbol) } + attr_accessor :status + + # The total number of bytes used by the files in the vector store. + sig { returns(Integer) } + attr_accessor :usage_bytes + + # The expiration policy for a vector store. + sig { returns(T.nilable(OpenAI::VectorStore::ExpiresAfter)) } + attr_reader :expires_after + + sig do + params(expires_after: OpenAI::VectorStore::ExpiresAfter::OrHash).void + end + attr_writer :expires_after + + # The Unix timestamp (in seconds) for when the vector store will expire. + sig { returns(T.nilable(Integer)) } + attr_accessor :expires_at + + # A vector store is a collection of processed files can be used by the + # `file_search` tool. + sig do + params( + id: String, + created_at: Integer, + file_counts: OpenAI::VectorStore::FileCounts::OrHash, + last_active_at: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + status: OpenAI::VectorStore::Status::OrSymbol, + usage_bytes: Integer, + expires_after: OpenAI::VectorStore::ExpiresAfter::OrHash, + expires_at: T.nilable(Integer), + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier, which can be referenced in API endpoints. + id:, + # The Unix timestamp (in seconds) for when the vector store was created. + created_at:, + file_counts:, + # The Unix timestamp (in seconds) for when the vector store was last active. + last_active_at:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + # The name of the vector store. + name:, + # The status of the vector store, which can be either `expired`, `in_progress`, or + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. + status:, + # The total number of bytes used by the files in the vector store. + usage_bytes:, + # The expiration policy for a vector store. + expires_after: nil, + # The Unix timestamp (in seconds) for when the vector store will expire. + expires_at: nil, + # The object type, which is always `vector_store`. + object: :vector_store + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + file_counts: OpenAI::VectorStore::FileCounts, + last_active_at: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + object: Symbol, + status: OpenAI::VectorStore::Status::TaggedSymbol, + usage_bytes: Integer, + expires_after: OpenAI::VectorStore::ExpiresAfter, + expires_at: T.nilable(Integer) + } + ) + end + def to_hash + end + + class FileCounts < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::VectorStore::FileCounts, OpenAI::Internal::AnyHash) + end + + # The number of files that were cancelled. + sig { returns(Integer) } + attr_accessor :cancelled + + # The number of files that have been successfully processed. + sig { returns(Integer) } + attr_accessor :completed + + # The number of files that have failed to process. + sig { returns(Integer) } + attr_accessor :failed + + # The number of files that are currently being processed. + sig { returns(Integer) } + attr_accessor :in_progress + + # The total number of files. + sig { returns(Integer) } + attr_accessor :total + + sig do + params( + cancelled: Integer, + completed: Integer, + failed: Integer, + in_progress: Integer, + total: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of files that were cancelled. + cancelled:, + # The number of files that have been successfully processed. + completed:, + # The number of files that have failed to process. + failed:, + # The number of files that are currently being processed. + in_progress:, + # The total number of files. + total: + ) + end + + sig do + override.returns( + { + cancelled: Integer, + completed: Integer, + failed: Integer, + in_progress: Integer, + total: Integer + } + ) + end + def to_hash + end + end + + # The status of the vector store, which can be either `expired`, `in_progress`, or + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::VectorStore::Status) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EXPIRED = T.let(:expired, OpenAI::VectorStore::Status::TaggedSymbol) + IN_PROGRESS = + T.let(:in_progress, OpenAI::VectorStore::Status::TaggedSymbol) + COMPLETED = T.let(:completed, OpenAI::VectorStore::Status::TaggedSymbol) + + sig do + override.returns(T::Array[OpenAI::VectorStore::Status::TaggedSymbol]) + end + def self.values + end + end + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::VectorStore::ExpiresAfter, OpenAI::Internal::AnyHash) + end + + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. + sig { returns(Symbol) } + attr_accessor :anchor + + # The number of days after the anchor time that the vector store will expire. + sig { returns(Integer) } + attr_accessor :days + + # The expiration policy for a vector store. + sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } + def self.new( + # The number of days after the anchor time that the vector store will expire. + days:, + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. + anchor: :last_active_at + ) + end + + sig { override.returns({ anchor: Symbol, days: Integer }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_store_create_params.rbi b/rbi/openai/models/vector_store_create_params.rbi new file mode 100644 index 00000000..674fc93d --- /dev/null +++ b/rbi/openai/models/vector_store_create_params.rbi @@ -0,0 +1,166 @@ +# typed: strong + +module OpenAI + module Models + class VectorStoreCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::VectorStoreCreateParams, OpenAI::Internal::AnyHash) + end + + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + sig do + returns( + T.nilable( + T.any( + OpenAI::AutoFileChunkingStrategyParam, + OpenAI::StaticFileChunkingStrategyObjectParam + ) + ) + ) + end + attr_reader :chunking_strategy + + sig do + params( + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ) + ).void + end + attr_writer :chunking_strategy + + # The expiration policy for a vector store. + sig { returns(T.nilable(OpenAI::VectorStoreCreateParams::ExpiresAfter)) } + attr_reader :expires_after + + sig do + params( + expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter::OrHash + ).void + end + attr_writer :expires_after + + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. + sig { returns(T.nilable(T::Array[String])) } + attr_reader :file_ids + + sig { params(file_ids: T::Array[String]).void } + attr_writer :file_ids + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The name of the vector store. + sig { returns(T.nilable(String)) } + attr_reader :name + + sig { params(name: String).void } + attr_writer :name + + sig do + params( + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ), + expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter::OrHash, + file_ids: T::Array[String], + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil, + # The expiration policy for a vector store. + expires_after: nil, + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. + file_ids: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the vector store. + name: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam, + OpenAI::StaticFileChunkingStrategyObjectParam + ), + expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter, + file_ids: T::Array[String], + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStoreCreateParams::ExpiresAfter, + OpenAI::Internal::AnyHash + ) + end + + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. + sig { returns(Symbol) } + attr_accessor :anchor + + # The number of days after the anchor time that the vector store will expire. + sig { returns(Integer) } + attr_accessor :days + + # The expiration policy for a vector store. + sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } + def self.new( + # The number of days after the anchor time that the vector store will expire. + days:, + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. + anchor: :last_active_at + ) + end + + sig { override.returns({ anchor: Symbol, days: Integer }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_store_delete_params.rbi b/rbi/openai/models/vector_store_delete_params.rbi new file mode 100644 index 00000000..e7cf7db1 --- /dev/null +++ b/rbi/openai/models/vector_store_delete_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class VectorStoreDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::VectorStoreDeleteParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/vector_store_deleted.rbi b/rbi/openai/models/vector_store_deleted.rbi new file mode 100644 index 00000000..4967947f --- /dev/null +++ b/rbi/openai/models/vector_store_deleted.rbi @@ -0,0 +1,35 @@ +# typed: strong + +module OpenAI + module Models + class VectorStoreDeleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::VectorStoreDeleted, OpenAI::Internal::AnyHash) + end + + sig { returns(String) } + attr_accessor :id + + sig { returns(T::Boolean) } + attr_accessor :deleted + + sig { returns(Symbol) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: Symbol).returns( + T.attached_class + ) + end + def self.new(id:, deleted:, object: :"vector_store.deleted") + end + + sig do + override.returns({ id: String, deleted: T::Boolean, object: Symbol }) + end + def to_hash + end + end + end +end diff --git a/rbi/openai/models/vector_store_list_params.rbi b/rbi/openai/models/vector_store_list_params.rbi new file mode 100644 index 00000000..4ce80ba7 --- /dev/null +++ b/rbi/openai/models/vector_store_list_params.rbi @@ -0,0 +1,116 @@ +# typed: strong + +module OpenAI + module Models + class VectorStoreListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::VectorStoreListParams, OpenAI::Internal::AnyHash) + end + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + sig { returns(T.nilable(String)) } + attr_reader :before + + sig { params(before: String).void } + attr_writer :before + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + sig { returns(T.nilable(OpenAI::VectorStoreListParams::Order::OrSymbol)) } + attr_reader :order + + sig { params(order: OpenAI::VectorStoreListParams::Order::OrSymbol).void } + attr_writer :order + + sig do + params( + after: String, + before: String, + limit: Integer, + order: OpenAI::VectorStoreListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + before: String, + limit: Integer, + order: OpenAI::VectorStoreListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias { T.all(Symbol, OpenAI::VectorStoreListParams::Order) } + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = T.let(:asc, OpenAI::VectorStoreListParams::Order::TaggedSymbol) + DESC = T.let(:desc, OpenAI::VectorStoreListParams::Order::TaggedSymbol) + + sig do + override.returns( + T::Array[OpenAI::VectorStoreListParams::Order::TaggedSymbol] + ) + end + def self.values + end + end + end + end +end diff --git a/rbi/openai/models/vector_store_retrieve_params.rbi b/rbi/openai/models/vector_store_retrieve_params.rbi new file mode 100644 index 00000000..337cd6e5 --- /dev/null +++ b/rbi/openai/models/vector_store_retrieve_params.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Models + class VectorStoreRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::VectorStoreRetrieveParams, OpenAI::Internal::AnyHash) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/vector_store_search_params.rbi b/rbi/openai/models/vector_store_search_params.rbi new file mode 100644 index 00000000..74b8d1bd --- /dev/null +++ b/rbi/openai/models/vector_store_search_params.rbi @@ -0,0 +1,253 @@ +# typed: strong + +module OpenAI + module Models + class VectorStoreSearchParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::VectorStoreSearchParams, OpenAI::Internal::AnyHash) + end + + # A query string for a search + sig { returns(OpenAI::VectorStoreSearchParams::Query::Variants) } + attr_accessor :query + + # A filter to apply based on file attributes. + sig do + returns( + T.nilable(T.any(OpenAI::ComparisonFilter, OpenAI::CompoundFilter)) + ) + end + attr_reader :filters + + sig do + params( + filters: + T.any( + OpenAI::ComparisonFilter::OrHash, + OpenAI::CompoundFilter::OrHash + ) + ).void + end + attr_writer :filters + + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. + sig { returns(T.nilable(Integer)) } + attr_reader :max_num_results + + sig { params(max_num_results: Integer).void } + attr_writer :max_num_results + + # Ranking options for search. + sig do + returns(T.nilable(OpenAI::VectorStoreSearchParams::RankingOptions)) + end + attr_reader :ranking_options + + sig do + params( + ranking_options: + OpenAI::VectorStoreSearchParams::RankingOptions::OrHash + ).void + end + attr_writer :ranking_options + + # Whether to rewrite the natural language query for vector search. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :rewrite_query + + sig { params(rewrite_query: T::Boolean).void } + attr_writer :rewrite_query + + sig do + params( + query: OpenAI::VectorStoreSearchParams::Query::Variants, + filters: + T.any( + OpenAI::ComparisonFilter::OrHash, + OpenAI::CompoundFilter::OrHash + ), + max_num_results: Integer, + ranking_options: + OpenAI::VectorStoreSearchParams::RankingOptions::OrHash, + rewrite_query: T::Boolean, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A query string for a search + query:, + # A filter to apply based on file attributes. + filters: nil, + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. + max_num_results: nil, + # Ranking options for search. + ranking_options: nil, + # Whether to rewrite the natural language query for vector search. + rewrite_query: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + query: OpenAI::VectorStoreSearchParams::Query::Variants, + filters: T.any(OpenAI::ComparisonFilter, OpenAI::CompoundFilter), + max_num_results: Integer, + ranking_options: OpenAI::VectorStoreSearchParams::RankingOptions, + rewrite_query: T::Boolean, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # A query string for a search + module Query + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(String, T::Array[String]) } + + sig do + override.returns( + T::Array[OpenAI::VectorStoreSearchParams::Query::Variants] + ) + end + def self.variants + end + + StringArray = + T.let( + OpenAI::Internal::Type::ArrayOf[String], + OpenAI::Internal::Type::Converter + ) + end + + # A filter to apply based on file attributes. + module Filters + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any(OpenAI::ComparisonFilter, OpenAI::CompoundFilter) + end + + sig do + override.returns( + T::Array[OpenAI::VectorStoreSearchParams::Filters::Variants] + ) + end + def self.variants + end + end + + class RankingOptions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStoreSearchParams::RankingOptions, + OpenAI::Internal::AnyHash + ) + end + + # Enable re-ranking; set to `none` to disable, which can help reduce latency. + sig do + returns( + T.nilable( + OpenAI::VectorStoreSearchParams::RankingOptions::Ranker::OrSymbol + ) + ) + end + attr_reader :ranker + + sig do + params( + ranker: + OpenAI::VectorStoreSearchParams::RankingOptions::Ranker::OrSymbol + ).void + end + attr_writer :ranker + + sig { returns(T.nilable(Float)) } + attr_reader :score_threshold + + sig { params(score_threshold: Float).void } + attr_writer :score_threshold + + # Ranking options for search. + sig do + params( + ranker: + OpenAI::VectorStoreSearchParams::RankingOptions::Ranker::OrSymbol, + score_threshold: Float + ).returns(T.attached_class) + end + def self.new( + # Enable re-ranking; set to `none` to disable, which can help reduce latency. + ranker: nil, + score_threshold: nil + ) + end + + sig do + override.returns( + { + ranker: + OpenAI::VectorStoreSearchParams::RankingOptions::Ranker::OrSymbol, + score_threshold: Float + } + ) + end + def to_hash + end + + # Enable re-ranking; set to `none` to disable, which can help reduce latency. + module Ranker + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::VectorStoreSearchParams::RankingOptions::Ranker + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + NONE = + T.let( + :none, + OpenAI::VectorStoreSearchParams::RankingOptions::Ranker::TaggedSymbol + ) + AUTO = + T.let( + :auto, + OpenAI::VectorStoreSearchParams::RankingOptions::Ranker::TaggedSymbol + ) + DEFAULT_2024_11_15 = + T.let( + :"default-2024-11-15", + OpenAI::VectorStoreSearchParams::RankingOptions::Ranker::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::VectorStoreSearchParams::RankingOptions::Ranker::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/vector_store_search_response.rbi b/rbi/openai/models/vector_store_search_response.rbi new file mode 100644 index 00000000..eb825847 --- /dev/null +++ b/rbi/openai/models/vector_store_search_response.rbi @@ -0,0 +1,202 @@ +# typed: strong + +module OpenAI + module Models + class VectorStoreSearchResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::VectorStoreSearchResponse, + OpenAI::Internal::AnyHash + ) + end + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + sig do + returns( + T.nilable( + T::Hash[ + Symbol, + OpenAI::Models::VectorStoreSearchResponse::Attribute::Variants + ] + ) + ) + end + attr_accessor :attributes + + # Content chunks from the file. + sig do + returns(T::Array[OpenAI::Models::VectorStoreSearchResponse::Content]) + end + attr_accessor :content + + # The ID of the vector store file. + sig { returns(String) } + attr_accessor :file_id + + # The name of the vector store file. + sig { returns(String) } + attr_accessor :filename + + # The similarity score for the result. + sig { returns(Float) } + attr_accessor :score + + sig do + params( + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::Models::VectorStoreSearchResponse::Attribute::Variants + ] + ), + content: + T::Array[ + OpenAI::Models::VectorStoreSearchResponse::Content::OrHash + ], + file_id: String, + filename: String, + score: Float + ).returns(T.attached_class) + end + def self.new( + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes:, + # Content chunks from the file. + content:, + # The ID of the vector store file. + file_id:, + # The name of the vector store file. + filename:, + # The similarity score for the result. + score: + ) + end + + sig do + override.returns( + { + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::Models::VectorStoreSearchResponse::Attribute::Variants + ] + ), + content: + T::Array[OpenAI::Models::VectorStoreSearchResponse::Content], + file_id: String, + filename: String, + score: Float + } + ) + end + def to_hash + end + + module Attribute + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(String, Float, T::Boolean) } + + sig do + override.returns( + T::Array[ + OpenAI::Models::VectorStoreSearchResponse::Attribute::Variants + ] + ) + end + def self.variants + end + end + + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::VectorStoreSearchResponse::Content, + OpenAI::Internal::AnyHash + ) + end + + # The text content returned from search. + sig { returns(String) } + attr_accessor :text + + # The type of content. + sig do + returns( + OpenAI::Models::VectorStoreSearchResponse::Content::Type::TaggedSymbol + ) + end + attr_accessor :type + + sig do + params( + text: String, + type: + OpenAI::Models::VectorStoreSearchResponse::Content::Type::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The text content returned from search. + text:, + # The type of content. + type: + ) + end + + sig do + override.returns( + { + text: String, + type: + OpenAI::Models::VectorStoreSearchResponse::Content::Type::TaggedSymbol + } + ) + end + def to_hash + end + + # The type of content. + module Type + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Models::VectorStoreSearchResponse::Content::Type + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + TEXT = + T.let( + :text, + OpenAI::Models::VectorStoreSearchResponse::Content::Type::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Models::VectorStoreSearchResponse::Content::Type::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/vector_store_update_params.rbi b/rbi/openai/models/vector_store_update_params.rbi new file mode 100644 index 00000000..1d755b92 --- /dev/null +++ b/rbi/openai/models/vector_store_update_params.rbi @@ -0,0 +1,113 @@ +# typed: strong + +module OpenAI + module Models + class VectorStoreUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any(OpenAI::VectorStoreUpdateParams, OpenAI::Internal::AnyHash) + end + + # The expiration policy for a vector store. + sig { returns(T.nilable(OpenAI::VectorStoreUpdateParams::ExpiresAfter)) } + attr_reader :expires_after + + sig do + params( + expires_after: + T.nilable(OpenAI::VectorStoreUpdateParams::ExpiresAfter::OrHash) + ).void + end + attr_writer :expires_after + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + sig { returns(T.nilable(T::Hash[Symbol, String])) } + attr_accessor :metadata + + # The name of the vector store. + sig { returns(T.nilable(String)) } + attr_accessor :name + + sig do + params( + expires_after: + T.nilable(OpenAI::VectorStoreUpdateParams::ExpiresAfter::OrHash), + metadata: T.nilable(T::Hash[Symbol, String]), + name: T.nilable(String), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # The expiration policy for a vector store. + expires_after: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the vector store. + name: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + expires_after: + T.nilable(OpenAI::VectorStoreUpdateParams::ExpiresAfter), + metadata: T.nilable(T::Hash[Symbol, String]), + name: T.nilable(String), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStoreUpdateParams::ExpiresAfter, + OpenAI::Internal::AnyHash + ) + end + + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. + sig { returns(Symbol) } + attr_accessor :anchor + + # The number of days after the anchor time that the vector store will expire. + sig { returns(Integer) } + attr_accessor :days + + # The expiration policy for a vector store. + sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } + def self.new( + # The number of days after the anchor time that the vector store will expire. + days:, + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. + anchor: :last_active_at + ) + end + + sig { override.returns({ anchor: Symbol, days: Integer }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_batch_cancel_params.rbi b/rbi/openai/models/vector_stores/file_batch_cancel_params.rbi new file mode 100644 index 00000000..dbee23e0 --- /dev/null +++ b/rbi/openai/models/vector_stores/file_batch_cancel_params.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileBatchCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileBatchCancelParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :vector_store_id + + sig do + params( + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(vector_store_id:, request_options: {}) + end + + sig do + override.returns( + { vector_store_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_batch_create_params.rbi b/rbi/openai/models/vector_stores/file_batch_create_params.rbi new file mode 100644 index 00000000..c4e42f6b --- /dev/null +++ b/rbi/openai/models/vector_stores/file_batch_create_params.rbi @@ -0,0 +1,143 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileBatchCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileBatchCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. + sig { returns(T::Array[String]) } + attr_accessor :file_ids + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + sig do + returns( + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileBatchCreateParams::Attribute::Variants + ] + ) + ) + end + attr_accessor :attributes + + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + sig do + returns( + T.nilable( + T.any( + OpenAI::AutoFileChunkingStrategyParam, + OpenAI::StaticFileChunkingStrategyObjectParam + ) + ) + ) + end + attr_reader :chunking_strategy + + sig do + params( + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ) + ).void + end + attr_writer :chunking_strategy + + sig do + params( + file_ids: T::Array[String], + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileBatchCreateParams::Attribute::Variants + ] + ), + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. + file_ids:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + file_ids: T::Array[String], + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileBatchCreateParams::Attribute::Variants + ] + ), + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam, + OpenAI::StaticFileChunkingStrategyObjectParam + ), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + module Attribute + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(String, Float, T::Boolean) } + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::FileBatchCreateParams::Attribute::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_batch_list_files_params.rbi b/rbi/openai/models/vector_stores/file_batch_list_files_params.rbi new file mode 100644 index 00000000..036aa011 --- /dev/null +++ b/rbi/openai/models/vector_stores/file_batch_list_files_params.rbi @@ -0,0 +1,224 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileBatchListFilesParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileBatchListFilesParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :vector_store_id + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + sig { returns(T.nilable(String)) } + attr_reader :before + + sig { params(before: String).void } + attr_writer :before + + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + sig do + returns( + T.nilable( + OpenAI::VectorStores::FileBatchListFilesParams::Filter::OrSymbol + ) + ) + end + attr_reader :filter + + sig do + params( + filter: + OpenAI::VectorStores::FileBatchListFilesParams::Filter::OrSymbol + ).void + end + attr_writer :filter + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + sig do + returns( + T.nilable( + OpenAI::VectorStores::FileBatchListFilesParams::Order::OrSymbol + ) + ) + end + attr_reader :order + + sig do + params( + order: + OpenAI::VectorStores::FileBatchListFilesParams::Order::OrSymbol + ).void + end + attr_writer :order + + sig do + params( + vector_store_id: String, + after: String, + before: String, + filter: + OpenAI::VectorStores::FileBatchListFilesParams::Filter::OrSymbol, + limit: Integer, + order: + OpenAI::VectorStores::FileBatchListFilesParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + vector_store_id:, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + filter: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + vector_store_id: String, + after: String, + before: String, + filter: + OpenAI::VectorStores::FileBatchListFilesParams::Filter::OrSymbol, + limit: Integer, + order: + OpenAI::VectorStores::FileBatchListFilesParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + module Filter + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::VectorStores::FileBatchListFilesParams::Filter + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::VectorStores::FileBatchListFilesParams::Filter::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::VectorStores::FileBatchListFilesParams::Filter::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::VectorStores::FileBatchListFilesParams::Filter::TaggedSymbol + ) + CANCELLED = + T.let( + :cancelled, + OpenAI::VectorStores::FileBatchListFilesParams::Filter::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::FileBatchListFilesParams::Filter::TaggedSymbol + ] + ) + end + def self.values + end + end + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::VectorStores::FileBatchListFilesParams::Order + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let( + :asc, + OpenAI::VectorStores::FileBatchListFilesParams::Order::TaggedSymbol + ) + DESC = + T.let( + :desc, + OpenAI::VectorStores::FileBatchListFilesParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::FileBatchListFilesParams::Order::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_batch_retrieve_params.rbi b/rbi/openai/models/vector_stores/file_batch_retrieve_params.rbi new file mode 100644 index 00000000..19120d57 --- /dev/null +++ b/rbi/openai/models/vector_stores/file_batch_retrieve_params.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileBatchRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileBatchRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :vector_store_id + + sig do + params( + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(vector_store_id:, request_options: {}) + end + + sig do + override.returns( + { vector_store_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_content_params.rbi b/rbi/openai/models/vector_stores/file_content_params.rbi new file mode 100644 index 00000000..91dccd3c --- /dev/null +++ b/rbi/openai/models/vector_stores/file_content_params.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileContentParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileContentParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :vector_store_id + + sig do + params( + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(vector_store_id:, request_options: {}) + end + + sig do + override.returns( + { vector_store_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_content_response.rbi b/rbi/openai/models/vector_stores/file_content_response.rbi new file mode 100644 index 00000000..1a363bd6 --- /dev/null +++ b/rbi/openai/models/vector_stores/file_content_response.rbi @@ -0,0 +1,44 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileContentResponse < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Models::VectorStores::FileContentResponse, + OpenAI::Internal::AnyHash + ) + end + + # The text content + sig { returns(T.nilable(String)) } + attr_reader :text + + sig { params(text: String).void } + attr_writer :text + + # The content type (currently only `"text"`) + sig { returns(T.nilable(String)) } + attr_reader :type + + sig { params(type: String).void } + attr_writer :type + + sig { params(text: String, type: String).returns(T.attached_class) } + def self.new( + # The text content + text: nil, + # The content type (currently only `"text"`) + type: nil + ) + end + + sig { override.returns({ text: String, type: String }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_create_params.rbi b/rbi/openai/models/vector_stores/file_create_params.rbi new file mode 100644 index 00000000..a335e71c --- /dev/null +++ b/rbi/openai/models/vector_stores/file_create_params.rbi @@ -0,0 +1,143 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileCreateParams, + OpenAI::Internal::AnyHash + ) + end + + # A [File](https://platform.openai.com/docs/api-reference/files) ID that the + # vector store should use. Useful for tools like `file_search` that can access + # files. + sig { returns(String) } + attr_accessor :file_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + sig do + returns( + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileCreateParams::Attribute::Variants + ] + ) + ) + end + attr_accessor :attributes + + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + sig do + returns( + T.nilable( + T.any( + OpenAI::AutoFileChunkingStrategyParam, + OpenAI::StaticFileChunkingStrategyObjectParam + ) + ) + ) + end + attr_reader :chunking_strategy + + sig do + params( + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ) + ).void + end + attr_writer :chunking_strategy + + sig do + params( + file_id: String, + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileCreateParams::Attribute::Variants + ] + ), + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A [File](https://platform.openai.com/docs/api-reference/files) ID that the + # vector store should use. Useful for tools like `file_search` that can access + # files. + file_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + file_id: String, + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileCreateParams::Attribute::Variants + ] + ), + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam, + OpenAI::StaticFileChunkingStrategyObjectParam + ), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + module Attribute + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(String, Float, T::Boolean) } + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::FileCreateParams::Attribute::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_delete_params.rbi b/rbi/openai/models/vector_stores/file_delete_params.rbi new file mode 100644 index 00000000..28f556ed --- /dev/null +++ b/rbi/openai/models/vector_stores/file_delete_params.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileDeleteParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :vector_store_id + + sig do + params( + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(vector_store_id:, request_options: {}) + end + + sig do + override.returns( + { vector_store_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_list_params.rbi b/rbi/openai/models/vector_stores/file_list_params.rbi new file mode 100644 index 00000000..87c16199 --- /dev/null +++ b/rbi/openai/models/vector_stores/file_list_params.rbi @@ -0,0 +1,202 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileListParams, + OpenAI::Internal::AnyHash + ) + end + + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + sig { returns(T.nilable(String)) } + attr_reader :after + + sig { params(after: String).void } + attr_writer :after + + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + sig { returns(T.nilable(String)) } + attr_reader :before + + sig { params(before: String).void } + attr_writer :before + + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + sig do + returns( + T.nilable(OpenAI::VectorStores::FileListParams::Filter::OrSymbol) + ) + end + attr_reader :filter + + sig do + params( + filter: OpenAI::VectorStores::FileListParams::Filter::OrSymbol + ).void + end + attr_writer :filter + + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + sig { returns(T.nilable(Integer)) } + attr_reader :limit + + sig { params(limit: Integer).void } + attr_writer :limit + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + sig do + returns( + T.nilable(OpenAI::VectorStores::FileListParams::Order::OrSymbol) + ) + end + attr_reader :order + + sig do + params( + order: OpenAI::VectorStores::FileListParams::Order::OrSymbol + ).void + end + attr_writer :order + + sig do + params( + after: String, + before: String, + filter: OpenAI::VectorStores::FileListParams::Filter::OrSymbol, + limit: Integer, + order: OpenAI::VectorStores::FileListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + filter: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + sig do + override.returns( + { + after: String, + before: String, + filter: OpenAI::VectorStores::FileListParams::Filter::OrSymbol, + limit: Integer, + order: OpenAI::VectorStores::FileListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + module Filter + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::VectorStores::FileListParams::Filter) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::VectorStores::FileListParams::Filter::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::VectorStores::FileListParams::Filter::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::VectorStores::FileListParams::Filter::TaggedSymbol + ) + CANCELLED = + T.let( + :cancelled, + OpenAI::VectorStores::FileListParams::Filter::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::FileListParams::Filter::TaggedSymbol + ] + ) + end + def self.values + end + end + + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + module Order + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::VectorStores::FileListParams::Order) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + ASC = + T.let( + :asc, + OpenAI::VectorStores::FileListParams::Order::TaggedSymbol + ) + DESC = + T.let( + :desc, + OpenAI::VectorStores::FileListParams::Order::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::FileListParams::Order::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_retrieve_params.rbi b/rbi/openai/models/vector_stores/file_retrieve_params.rbi new file mode 100644 index 00000000..02669095 --- /dev/null +++ b/rbi/openai/models/vector_stores/file_retrieve_params.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileRetrieveParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :vector_store_id + + sig do + params( + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new(vector_store_id:, request_options: {}) + end + + sig do + override.returns( + { vector_store_id: String, request_options: OpenAI::RequestOptions } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/file_update_params.rbi b/rbi/openai/models/vector_stores/file_update_params.rbi new file mode 100644 index 00000000..da5190dc --- /dev/null +++ b/rbi/openai/models/vector_stores/file_update_params.rbi @@ -0,0 +1,99 @@ +# typed: strong + +module OpenAI + module Models + module VectorStores + class FileUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::FileUpdateParams, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :vector_store_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + sig do + returns( + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileUpdateParams::Attribute::Variants + ] + ) + ) + end + attr_accessor :attributes + + sig do + params( + vector_store_id: String, + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileUpdateParams::Attribute::Variants + ] + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(T.attached_class) + end + def self.new( + vector_store_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes:, + request_options: {} + ) + end + + sig do + override.returns( + { + vector_store_id: String, + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileUpdateParams::Attribute::Variants + ] + ), + request_options: OpenAI::RequestOptions + } + ) + end + def to_hash + end + + module Attribute + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(String, Float, T::Boolean) } + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::FileUpdateParams::Attribute::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/vector_store_file.rbi b/rbi/openai/models/vector_stores/vector_store_file.rbi new file mode 100644 index 00000000..9207da17 --- /dev/null +++ b/rbi/openai/models/vector_stores/vector_store_file.rbi @@ -0,0 +1,337 @@ +# typed: strong + +module OpenAI + module Models + VectorStoreFile = VectorStores::VectorStoreFile + + module VectorStores + class VectorStoreFile < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::VectorStoreFile, + OpenAI::Internal::AnyHash + ) + end + + # The identifier, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the vector store file was created. + sig { returns(Integer) } + attr_accessor :created_at + + # The last error associated with this vector store file. Will be `null` if there + # are no errors. + sig do + returns(T.nilable(OpenAI::VectorStores::VectorStoreFile::LastError)) + end + attr_reader :last_error + + sig do + params( + last_error: + T.nilable( + OpenAI::VectorStores::VectorStoreFile::LastError::OrHash + ) + ).void + end + attr_writer :last_error + + # The object type, which is always `vector_store.file`. + sig { returns(Symbol) } + attr_accessor :object + + # The status of the vector store file, which can be either `in_progress`, + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. + sig do + returns(OpenAI::VectorStores::VectorStoreFile::Status::TaggedSymbol) + end + attr_accessor :status + + # The total vector store usage in bytes. Note that this may be different from the + # original file size. + sig { returns(Integer) } + attr_accessor :usage_bytes + + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. + sig { returns(String) } + attr_accessor :vector_store_id + + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + sig do + returns( + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::VectorStoreFile::Attribute::Variants + ] + ) + ) + end + attr_accessor :attributes + + # The strategy used to chunk the file. + sig { returns(T.nilable(OpenAI::FileChunkingStrategy::Variants)) } + attr_reader :chunking_strategy + + sig do + params( + chunking_strategy: + T.any( + OpenAI::StaticFileChunkingStrategyObject::OrHash, + OpenAI::OtherFileChunkingStrategyObject::OrHash + ) + ).void + end + attr_writer :chunking_strategy + + # A list of files attached to a vector store. + sig do + params( + id: String, + created_at: Integer, + last_error: + T.nilable( + OpenAI::VectorStores::VectorStoreFile::LastError::OrHash + ), + status: OpenAI::VectorStores::VectorStoreFile::Status::OrSymbol, + usage_bytes: Integer, + vector_store_id: String, + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::VectorStoreFile::Attribute::Variants + ] + ), + chunking_strategy: + T.any( + OpenAI::StaticFileChunkingStrategyObject::OrHash, + OpenAI::OtherFileChunkingStrategyObject::OrHash + ), + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier, which can be referenced in API endpoints. + id:, + # The Unix timestamp (in seconds) for when the vector store file was created. + created_at:, + # The last error associated with this vector store file. Will be `null` if there + # are no errors. + last_error:, + # The status of the vector store file, which can be either `in_progress`, + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. + status:, + # The total vector store usage in bytes. Note that this may be different from the + # original file size. + usage_bytes:, + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. + vector_store_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The strategy used to chunk the file. + chunking_strategy: nil, + # The object type, which is always `vector_store.file`. + object: :"vector_store.file" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + last_error: + T.nilable(OpenAI::VectorStores::VectorStoreFile::LastError), + object: Symbol, + status: + OpenAI::VectorStores::VectorStoreFile::Status::TaggedSymbol, + usage_bytes: Integer, + vector_store_id: String, + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::VectorStoreFile::Attribute::Variants + ] + ), + chunking_strategy: OpenAI::FileChunkingStrategy::Variants + } + ) + end + def to_hash + end + + class LastError < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::VectorStoreFile::LastError, + OpenAI::Internal::AnyHash + ) + end + + # One of `server_error` or `rate_limit_exceeded`. + sig do + returns( + OpenAI::VectorStores::VectorStoreFile::LastError::Code::TaggedSymbol + ) + end + attr_accessor :code + + # A human-readable description of the error. + sig { returns(String) } + attr_accessor :message + + # The last error associated with this vector store file. Will be `null` if there + # are no errors. + sig do + params( + code: + OpenAI::VectorStores::VectorStoreFile::LastError::Code::OrSymbol, + message: String + ).returns(T.attached_class) + end + def self.new( + # One of `server_error` or `rate_limit_exceeded`. + code:, + # A human-readable description of the error. + message: + ) + end + + sig do + override.returns( + { + code: + OpenAI::VectorStores::VectorStoreFile::LastError::Code::TaggedSymbol, + message: String + } + ) + end + def to_hash + end + + # One of `server_error` or `rate_limit_exceeded`. + module Code + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::VectorStores::VectorStoreFile::LastError::Code + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + SERVER_ERROR = + T.let( + :server_error, + OpenAI::VectorStores::VectorStoreFile::LastError::Code::TaggedSymbol + ) + UNSUPPORTED_FILE = + T.let( + :unsupported_file, + OpenAI::VectorStores::VectorStoreFile::LastError::Code::TaggedSymbol + ) + INVALID_FILE = + T.let( + :invalid_file, + OpenAI::VectorStores::VectorStoreFile::LastError::Code::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::VectorStoreFile::LastError::Code::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + # The status of the vector store file, which can be either `in_progress`, + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::VectorStores::VectorStoreFile::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::VectorStores::VectorStoreFile::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::VectorStores::VectorStoreFile::Status::TaggedSymbol + ) + CANCELLED = + T.let( + :cancelled, + OpenAI::VectorStores::VectorStoreFile::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::VectorStores::VectorStoreFile::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::VectorStoreFile::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + + module Attribute + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias { T.any(String, Float, T::Boolean) } + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::VectorStoreFile::Attribute::Variants + ] + ) + end + def self.variants + end + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/vector_store_file_batch.rbi b/rbi/openai/models/vector_stores/vector_store_file_batch.rbi new file mode 100644 index 00000000..e464e984 --- /dev/null +++ b/rbi/openai/models/vector_stores/vector_store_file_batch.rbi @@ -0,0 +1,219 @@ +# typed: strong + +module OpenAI + module Models + VectorStoreFileBatch = VectorStores::VectorStoreFileBatch + + module VectorStores + class VectorStoreFileBatch < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::VectorStoreFileBatch, + OpenAI::Internal::AnyHash + ) + end + + # The identifier, which can be referenced in API endpoints. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) for when the vector store files batch was + # created. + sig { returns(Integer) } + attr_accessor :created_at + + sig { returns(OpenAI::VectorStores::VectorStoreFileBatch::FileCounts) } + attr_reader :file_counts + + sig do + params( + file_counts: + OpenAI::VectorStores::VectorStoreFileBatch::FileCounts::OrHash + ).void + end + attr_writer :file_counts + + # The object type, which is always `vector_store.file_batch`. + sig { returns(Symbol) } + attr_accessor :object + + # The status of the vector store files batch, which can be either `in_progress`, + # `completed`, `cancelled` or `failed`. + sig do + returns( + OpenAI::VectorStores::VectorStoreFileBatch::Status::TaggedSymbol + ) + end + attr_accessor :status + + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. + sig { returns(String) } + attr_accessor :vector_store_id + + # A batch of files attached to a vector store. + sig do + params( + id: String, + created_at: Integer, + file_counts: + OpenAI::VectorStores::VectorStoreFileBatch::FileCounts::OrHash, + status: + OpenAI::VectorStores::VectorStoreFileBatch::Status::OrSymbol, + vector_store_id: String, + object: Symbol + ).returns(T.attached_class) + end + def self.new( + # The identifier, which can be referenced in API endpoints. + id:, + # The Unix timestamp (in seconds) for when the vector store files batch was + # created. + created_at:, + file_counts:, + # The status of the vector store files batch, which can be either `in_progress`, + # `completed`, `cancelled` or `failed`. + status:, + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. + vector_store_id:, + # The object type, which is always `vector_store.file_batch`. + object: :"vector_store.files_batch" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + file_counts: + OpenAI::VectorStores::VectorStoreFileBatch::FileCounts, + object: Symbol, + status: + OpenAI::VectorStores::VectorStoreFileBatch::Status::TaggedSymbol, + vector_store_id: String + } + ) + end + def to_hash + end + + class FileCounts < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::VectorStoreFileBatch::FileCounts, + OpenAI::Internal::AnyHash + ) + end + + # The number of files that where cancelled. + sig { returns(Integer) } + attr_accessor :cancelled + + # The number of files that have been processed. + sig { returns(Integer) } + attr_accessor :completed + + # The number of files that have failed to process. + sig { returns(Integer) } + attr_accessor :failed + + # The number of files that are currently being processed. + sig { returns(Integer) } + attr_accessor :in_progress + + # The total number of files. + sig { returns(Integer) } + attr_accessor :total + + sig do + params( + cancelled: Integer, + completed: Integer, + failed: Integer, + in_progress: Integer, + total: Integer + ).returns(T.attached_class) + end + def self.new( + # The number of files that where cancelled. + cancelled:, + # The number of files that have been processed. + completed:, + # The number of files that have failed to process. + failed:, + # The number of files that are currently being processed. + in_progress:, + # The total number of files. + total: + ) + end + + sig do + override.returns( + { + cancelled: Integer, + completed: Integer, + failed: Integer, + in_progress: Integer, + total: Integer + } + ) + end + def to_hash + end + end + + # The status of the vector store files batch, which can be either `in_progress`, + # `completed`, `cancelled` or `failed`. + module Status + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::VectorStores::VectorStoreFileBatch::Status) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + IN_PROGRESS = + T.let( + :in_progress, + OpenAI::VectorStores::VectorStoreFileBatch::Status::TaggedSymbol + ) + COMPLETED = + T.let( + :completed, + OpenAI::VectorStores::VectorStoreFileBatch::Status::TaggedSymbol + ) + CANCELLED = + T.let( + :cancelled, + OpenAI::VectorStores::VectorStoreFileBatch::Status::TaggedSymbol + ) + FAILED = + T.let( + :failed, + OpenAI::VectorStores::VectorStoreFileBatch::Status::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::VectorStores::VectorStoreFileBatch::Status::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/vector_stores/vector_store_file_deleted.rbi b/rbi/openai/models/vector_stores/vector_store_file_deleted.rbi new file mode 100644 index 00000000..a8813c5b --- /dev/null +++ b/rbi/openai/models/vector_stores/vector_store_file_deleted.rbi @@ -0,0 +1,42 @@ +# typed: strong + +module OpenAI + module Models + VectorStoreFileDeleted = VectorStores::VectorStoreFileDeleted + + module VectorStores + class VectorStoreFileDeleted < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::VectorStores::VectorStoreFileDeleted, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(String) } + attr_accessor :id + + sig { returns(T::Boolean) } + attr_accessor :deleted + + sig { returns(Symbol) } + attr_accessor :object + + sig do + params(id: String, deleted: T::Boolean, object: Symbol).returns( + T.attached_class + ) + end + def self.new(id:, deleted:, object: :"vector_store.file.deleted") + end + + sig do + override.returns({ id: String, deleted: T::Boolean, object: Symbol }) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi b/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi new file mode 100644 index 00000000..b400f160 --- /dev/null +++ b/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi @@ -0,0 +1,154 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class BatchCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::BatchCancelledWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the batch API request was cancelled. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::BatchCancelledWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::BatchCancelledWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `batch.cancelled`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::BatchCancelledWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::BatchCancelledWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a batch API request has been cancelled. + sig do + params( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchCancelledWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::BatchCancelledWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the batch API request was cancelled. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `batch.cancelled`. + type: :"batch.cancelled" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchCancelledWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::BatchCancelledWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::BatchCancelledWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the batch API request. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the batch API request. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::BatchCancelledWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::BatchCancelledWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::BatchCancelledWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi b/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi new file mode 100644 index 00000000..eb2777c0 --- /dev/null +++ b/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi @@ -0,0 +1,154 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class BatchCompletedWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::BatchCompletedWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the batch API request was completed. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::BatchCompletedWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::BatchCompletedWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `batch.completed`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::BatchCompletedWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::BatchCompletedWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a batch API request has been completed. + sig do + params( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchCompletedWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::BatchCompletedWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the batch API request was completed. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `batch.completed`. + type: :"batch.completed" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchCompletedWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::BatchCompletedWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::BatchCompletedWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the batch API request. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the batch API request. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::BatchCompletedWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::BatchCompletedWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::BatchCompletedWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi b/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi new file mode 100644 index 00000000..bb40a5fe --- /dev/null +++ b/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi @@ -0,0 +1,150 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class BatchExpiredWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::BatchExpiredWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the batch API request expired. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::BatchExpiredWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::BatchExpiredWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `batch.expired`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::BatchExpiredWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: OpenAI::Webhooks::BatchExpiredWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a batch API request has expired. + sig do + params( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchExpiredWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::BatchExpiredWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the batch API request expired. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `batch.expired`. + type: :"batch.expired" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchExpiredWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::BatchExpiredWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::BatchExpiredWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the batch API request. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the batch API request. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Webhooks::BatchExpiredWebhookEvent::Object) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::BatchExpiredWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::BatchExpiredWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi b/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi new file mode 100644 index 00000000..e6d403b3 --- /dev/null +++ b/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi @@ -0,0 +1,149 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class BatchFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::BatchFailedWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the batch API request failed. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::BatchFailedWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::BatchFailedWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `batch.failed`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::BatchFailedWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: OpenAI::Webhooks::BatchFailedWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a batch API request has failed. + sig do + params( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchFailedWebhookEvent::Data::OrHash, + object: OpenAI::Webhooks::BatchFailedWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the batch API request failed. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `batch.failed`. + type: :"batch.failed" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchFailedWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::BatchFailedWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::BatchFailedWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the batch API request. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the batch API request. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Webhooks::BatchFailedWebhookEvent::Object) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::BatchFailedWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::BatchFailedWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi b/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi new file mode 100644 index 00000000..41d4fcea --- /dev/null +++ b/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi @@ -0,0 +1,154 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class EvalRunCanceledWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::EvalRunCanceledWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the eval run was canceled. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `eval.run.canceled`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when an eval run has been canceled. + sig do + params( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the eval run was canceled. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `eval.run.canceled`. + type: :"eval.run.canceled" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the eval run. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the eval run. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi b/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi new file mode 100644 index 00000000..5df6eb00 --- /dev/null +++ b/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi @@ -0,0 +1,151 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class EvalRunFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::EvalRunFailedWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the eval run failed. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `eval.run.failed`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::EvalRunFailedWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::EvalRunFailedWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when an eval run has failed. + sig do + params( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::EvalRunFailedWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the eval run failed. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `eval.run.failed`. + type: :"eval.run.failed" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::EvalRunFailedWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the eval run. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the eval run. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Webhooks::EvalRunFailedWebhookEvent::Object) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::EvalRunFailedWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::EvalRunFailedWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi b/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi new file mode 100644 index 00000000..27c80361 --- /dev/null +++ b/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi @@ -0,0 +1,154 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class EvalRunSucceededWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::EvalRunSucceededWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the eval run succeeded. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `eval.run.succeeded`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when an eval run has succeeded. + sig do + params( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the eval run succeeded. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `eval.run.succeeded`. + type: :"eval.run.succeeded" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the eval run. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the eval run. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi b/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi new file mode 100644 index 00000000..e310d37c --- /dev/null +++ b/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi @@ -0,0 +1,158 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class FineTuningJobCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the fine-tuning job was cancelled. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig do + returns(OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data) + end + attr_reader :data + + sig do + params( + data: + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `fine_tuning.job.cancelled`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a fine-tuning job has been cancelled. + sig do + params( + id: String, + created_at: Integer, + data: + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the fine-tuning job was cancelled. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `fine_tuning.job.cancelled`. + type: :"fine_tuning.job.cancelled" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the fine-tuning job. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the fine-tuning job. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi b/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi new file mode 100644 index 00000000..f15252b2 --- /dev/null +++ b/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi @@ -0,0 +1,156 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class FineTuningJobFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the fine-tuning job failed. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `fine_tuning.job.failed`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a fine-tuning job has failed. + sig do + params( + id: String, + created_at: Integer, + data: + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the fine-tuning job failed. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `fine_tuning.job.failed`. + type: :"fine_tuning.job.failed" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the fine-tuning job. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the fine-tuning job. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi b/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi new file mode 100644 index 00000000..2012aefe --- /dev/null +++ b/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi @@ -0,0 +1,158 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class FineTuningJobSucceededWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the fine-tuning job succeeded. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig do + returns(OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data) + end + attr_reader :data + + sig do + params( + data: + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `fine_tuning.job.succeeded`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a fine-tuning job has succeeded. + sig do + params( + id: String, + created_at: Integer, + data: + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the fine-tuning job succeeded. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `fine_tuning.job.succeeded`. + type: :"fine_tuning.job.succeeded" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the fine-tuning job. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the fine-tuning job. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi b/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi new file mode 100644 index 00000000..7a0ff036 --- /dev/null +++ b/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi @@ -0,0 +1,154 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class ResponseCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::ResponseCancelledWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the model response was cancelled. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `response.cancelled`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::ResponseCancelledWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::ResponseCancelledWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a background response has been cancelled. + sig do + params( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::ResponseCancelledWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the model response was cancelled. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `response.cancelled`. + type: :"response.cancelled" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::ResponseCancelledWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the model response. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the model response. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::ResponseCancelledWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::ResponseCancelledWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::ResponseCancelledWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/response_completed_webhook_event.rbi b/rbi/openai/models/webhooks/response_completed_webhook_event.rbi new file mode 100644 index 00000000..16506ae1 --- /dev/null +++ b/rbi/openai/models/webhooks/response_completed_webhook_event.rbi @@ -0,0 +1,154 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class ResponseCompletedWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::ResponseCompletedWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the model response was completed. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `response.completed`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::ResponseCompletedWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::ResponseCompletedWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a background response has been completed. + sig do + params( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::ResponseCompletedWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the model response was completed. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `response.completed`. + type: :"response.completed" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::ResponseCompletedWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the model response. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the model response. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::ResponseCompletedWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::ResponseCompletedWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::ResponseCompletedWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/response_failed_webhook_event.rbi b/rbi/openai/models/webhooks/response_failed_webhook_event.rbi new file mode 100644 index 00000000..1c9edc19 --- /dev/null +++ b/rbi/openai/models/webhooks/response_failed_webhook_event.rbi @@ -0,0 +1,154 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class ResponseFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::ResponseFailedWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the model response failed. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::ResponseFailedWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::ResponseFailedWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `response.failed`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::ResponseFailedWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::ResponseFailedWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a background response has failed. + sig do + params( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseFailedWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::ResponseFailedWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the model response failed. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `response.failed`. + type: :"response.failed" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseFailedWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::ResponseFailedWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::ResponseFailedWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the model response. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the model response. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::ResponseFailedWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::ResponseFailedWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::ResponseFailedWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi b/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi new file mode 100644 index 00000000..94712e7b --- /dev/null +++ b/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi @@ -0,0 +1,155 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class ResponseIncompleteWebhookEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::ResponseIncompleteWebhookEvent, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the event. + sig { returns(String) } + attr_accessor :id + + # The Unix timestamp (in seconds) of when the model response was interrupted. + sig { returns(Integer) } + attr_accessor :created_at + + # Event data payload. + sig { returns(OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data) } + attr_reader :data + + sig do + params( + data: OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data::OrHash + ).void + end + attr_writer :data + + # The type of the event. Always `response.incomplete`. + sig { returns(Symbol) } + attr_accessor :type + + # The object of the event. Always `event`. + sig do + returns( + T.nilable( + OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::TaggedSymbol + ) + ) + end + attr_reader :object + + sig do + params( + object: + OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::OrSymbol + ).void + end + attr_writer :object + + # Sent when a background response has been interrupted. + sig do + params( + id: String, + created_at: Integer, + data: + OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data::OrHash, + object: + OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The unique ID of the event. + id:, + # The Unix timestamp (in seconds) of when the model response was interrupted. + created_at:, + # Event data payload. + data:, + # The object of the event. Always `event`. + object: nil, + # The type of the event. Always `response.incomplete`. + type: :"response.incomplete" + ) + end + + sig do + override.returns( + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data, + type: Symbol, + object: + OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::TaggedSymbol + } + ) + end + def to_hash + end + + class Data < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data, + OpenAI::Internal::AnyHash + ) + end + + # The unique ID of the model response. + sig { returns(String) } + attr_accessor :id + + # Event data payload. + sig { params(id: String).returns(T.attached_class) } + def self.new( + # The unique ID of the model response. + id: + ) + end + + sig { override.returns({ id: String }) } + def to_hash + end + end + + # The object of the event. Always `event`. + module Object + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + EVENT = + T.let( + :event, + OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/unwrap_webhook_event.rbi b/rbi/openai/models/webhooks/unwrap_webhook_event.rbi new file mode 100644 index 00000000..30214682 --- /dev/null +++ b/rbi/openai/models/webhooks/unwrap_webhook_event.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + # Sent when a batch API request has been cancelled. + module UnwrapWebhookEvent + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Webhooks::BatchCancelledWebhookEvent, + OpenAI::Webhooks::BatchCompletedWebhookEvent, + OpenAI::Webhooks::BatchExpiredWebhookEvent, + OpenAI::Webhooks::BatchFailedWebhookEvent, + OpenAI::Webhooks::EvalRunCanceledWebhookEvent, + OpenAI::Webhooks::EvalRunFailedWebhookEvent, + OpenAI::Webhooks::EvalRunSucceededWebhookEvent, + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent, + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent, + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent, + OpenAI::Webhooks::ResponseCancelledWebhookEvent, + OpenAI::Webhooks::ResponseCompletedWebhookEvent, + OpenAI::Webhooks::ResponseFailedWebhookEvent, + OpenAI::Webhooks::ResponseIncompleteWebhookEvent + ) + end + + sig do + override.returns( + T::Array[OpenAI::Webhooks::UnwrapWebhookEvent::Variants] + ) + end + def self.variants + end + end + end + end +end diff --git a/rbi/openai/models/webhooks/webhook_unwrap_params.rbi b/rbi/openai/models/webhooks/webhook_unwrap_params.rbi new file mode 100644 index 00000000..8d784207 --- /dev/null +++ b/rbi/openai/models/webhooks/webhook_unwrap_params.rbi @@ -0,0 +1,32 @@ +# typed: strong + +module OpenAI + module Models + module Webhooks + class WebhookUnwrapParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + OrHash = + T.type_alias do + T.any( + OpenAI::Webhooks::WebhookUnwrapParams, + OpenAI::Internal::AnyHash + ) + end + + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + T.attached_class + ) + end + def self.new(request_options: {}) + end + + sig { override.returns({ request_options: OpenAI::RequestOptions }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/request_options.rbi b/rbi/openai/request_options.rbi new file mode 100644 index 00000000..144d5a33 --- /dev/null +++ b/rbi/openai/request_options.rbi @@ -0,0 +1,55 @@ +# typed: strong + +module OpenAI + # Specify HTTP behaviour to use for a specific request. These options supplement + # or override those provided at the client level. + # + # When making a request, you can pass an actual {RequestOptions} instance, or + # simply pass a Hash with symbol keys matching the attributes on this class. + class RequestOptions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias { T.any(OpenAI::RequestOptions, OpenAI::Internal::AnyHash) } + + # @api private + sig { params(opts: OpenAI::RequestOptions::OrHash).void } + def self.validate!(opts) + end + + # Idempotency key to send with request and all associated retries. Will only be + # sent for write requests. + sig { returns(T.nilable(String)) } + attr_accessor :idempotency_key + + # Extra query params to send with the request. These are `.merge`’d into any + # `query` given at the client level. + sig do + returns( + T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))]) + ) + end + attr_accessor :extra_query + + # Extra headers to send with the request. These are `.merged`’d into any + # `extra_headers` given at the client level. + sig { returns(T.nilable(T::Hash[String, T.nilable(String)])) } + attr_accessor :extra_headers + + # Extra data to send with the request. These are deep merged into any data + # generated as part of the normal request. + sig { returns(T.nilable(T.anything)) } + attr_accessor :extra_body + + # Maximum number of retries to attempt after a failed initial request. + sig { returns(T.nilable(Integer)) } + attr_accessor :max_retries + + # Request timeout in seconds. + sig { returns(T.nilable(Float)) } + attr_accessor :timeout + + # Returns a new instance of RequestOptions. + sig { params(values: OpenAI::Internal::AnyHash).returns(T.attached_class) } + def self.new(values = {}) + end + end +end diff --git a/rbi/lib/openai/resources/audio.rbi b/rbi/openai/resources/audio.rbi similarity index 76% rename from rbi/lib/openai/resources/audio.rbi rename to rbi/openai/resources/audio.rbi index c3381f2e..1747c22c 100644 --- a/rbi/lib/openai/resources/audio.rbi +++ b/rbi/openai/resources/audio.rbi @@ -4,17 +4,15 @@ module OpenAI module Resources class Audio sig { returns(OpenAI::Resources::Audio::Transcriptions) } - def transcriptions - end + attr_reader :transcriptions sig { returns(OpenAI::Resources::Audio::Translations) } - def translations - end + attr_reader :translations sig { returns(OpenAI::Resources::Audio::Speech) } - def speech - end + attr_reader :speech + # @api private sig { params(client: OpenAI::Client).returns(T.attached_class) } def self.new(client:) end diff --git a/rbi/openai/resources/audio/speech.rbi b/rbi/openai/resources/audio/speech.rbi new file mode 100644 index 00000000..d7c6b56a --- /dev/null +++ b/rbi/openai/resources/audio/speech.rbi @@ -0,0 +1,57 @@ +# typed: strong + +module OpenAI + module Resources + class Audio + class Speech + # Generates audio from the input text. + sig do + params( + input: String, + model: T.any(String, OpenAI::Audio::SpeechModel::OrSymbol), + voice: + T.any(String, OpenAI::Audio::SpeechCreateParams::Voice::OrSymbol), + instructions: String, + response_format: + OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol, + speed: Float, + stream_format: + OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(StringIO) + end + def create( + # The text to generate audio for. The maximum length is 4096 characters. + input:, + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. + model:, + # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + # `verse`. Previews of the voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). + voice:, + # Control the voice of your generated audio with additional instructions. Does not + # work with `tts-1` or `tts-1-hd`. + instructions: nil, + # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + # `wav`, and `pcm`. + response_format: nil, + # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + # the default. + speed: nil, + # The format to stream the audio in. Supported formats are `sse` and `audio`. + # `sse` is not supported for `tts-1` or `tts-1-hd`. + stream_format: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/audio/transcriptions.rbi b/rbi/openai/resources/audio/transcriptions.rbi new file mode 100644 index 00000000..187218b0 --- /dev/null +++ b/rbi/openai/resources/audio/transcriptions.rbi @@ -0,0 +1,178 @@ +# typed: strong + +module OpenAI + module Resources + class Audio + class Transcriptions + # See {OpenAI::Resources::Audio::Transcriptions#create_streaming} for streaming + # counterpart. + # + # Transcribes audio into the input language. + sig do + params( + file: OpenAI::Internal::FileInput, + model: T.any(String, OpenAI::AudioModel::OrSymbol), + chunking_strategy: + T.nilable( + T.any( + Symbol, + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::OrHash + ) + ), + include: T::Array[OpenAI::Audio::TranscriptionInclude::OrSymbol], + language: String, + prompt: String, + response_format: OpenAI::AudioResponseFormat::OrSymbol, + temperature: Float, + timestamp_granularities: + T::Array[ + OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity::OrSymbol + ], + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Models::Audio::TranscriptionCreateResponse::Variants + ) + end + def create( + # The audio file object (not file name) to transcribe, in one of these formats: + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + file:, + # ID of the model to use. The options are `gpt-4o-transcribe`, + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). + model:, + # Controls how the audio is cut into chunks. When set to `"auto"`, the server + # first normalizes loudness and then uses voice activity detection (VAD) to choose + # boundaries. `server_vad` object can be provided to tweak VAD detection + # parameters manually. If unset, the audio is transcribed as a single block. + chunking_strategy: nil, + # Additional information to include in the transcription response. `logprobs` will + # return the log probabilities of the tokens in the response to understand the + # model's confidence in the transcription. `logprobs` only works with + # response_format set to `json` and only with the models `gpt-4o-transcribe` and + # `gpt-4o-mini-transcribe`. + include: nil, + # The language of the input audio. Supplying the input language in + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. + language: nil, + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. + prompt: nil, + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. + response_format: nil, + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. + temperature: nil, + # The timestamp granularities to populate for this transcription. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. + timestamp_granularities: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Audio::Transcriptions#create} for non-streaming + # counterpart. + # + # Transcribes audio into the input language. + sig do + params( + file: OpenAI::Internal::FileInput, + model: T.any(String, OpenAI::AudioModel::OrSymbol), + chunking_strategy: + T.nilable( + T.any( + Symbol, + OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::OrHash + ) + ), + include: T::Array[OpenAI::Audio::TranscriptionInclude::OrSymbol], + language: String, + prompt: String, + response_format: OpenAI::AudioResponseFormat::OrSymbol, + temperature: Float, + timestamp_granularities: + T::Array[ + OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity::OrSymbol + ], + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Stream[ + OpenAI::Audio::TranscriptionStreamEvent::Variants + ] + ) + end + def create_streaming( + # The audio file object (not file name) to transcribe, in one of these formats: + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + file:, + # ID of the model to use. The options are `gpt-4o-transcribe`, + # `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + # Whisper V2 model). + model:, + # Controls how the audio is cut into chunks. When set to `"auto"`, the server + # first normalizes loudness and then uses voice activity detection (VAD) to choose + # boundaries. `server_vad` object can be provided to tweak VAD detection + # parameters manually. If unset, the audio is transcribed as a single block. + chunking_strategy: nil, + # Additional information to include in the transcription response. `logprobs` will + # return the log probabilities of the tokens in the response to understand the + # model's confidence in the transcription. `logprobs` only works with + # response_format set to `json` and only with the models `gpt-4o-transcribe` and + # `gpt-4o-mini-transcribe`. + include: nil, + # The language of the input audio. Supplying the input language in + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. + language: nil, + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. + prompt: nil, + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + # the only supported format is `json`. + response_format: nil, + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. + temperature: nil, + # The timestamp granularities to populate for this transcription. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. + timestamp_granularities: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/audio/translations.rbi b/rbi/openai/resources/audio/translations.rbi new file mode 100644 index 00000000..bd8adba2 --- /dev/null +++ b/rbi/openai/resources/audio/translations.rbi @@ -0,0 +1,51 @@ +# typed: strong + +module OpenAI + module Resources + class Audio + class Translations + # Translates audio into English. + sig do + params( + file: OpenAI::Internal::FileInput, + model: T.any(String, OpenAI::AudioModel::OrSymbol), + prompt: String, + response_format: + OpenAI::Audio::TranslationCreateParams::ResponseFormat::OrSymbol, + temperature: Float, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::Audio::TranslationCreateResponse::Variants) + end + def create( + # The audio file object (not file name) translate, in one of these formats: flac, + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + file:, + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. + model:, + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. + prompt: nil, + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. + response_format: nil, + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. + temperature: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/batches.rbi b/rbi/openai/resources/batches.rbi new file mode 100644 index 00000000..26543c2d --- /dev/null +++ b/rbi/openai/resources/batches.rbi @@ -0,0 +1,109 @@ +# typed: strong + +module OpenAI + module Resources + class Batches + # Creates and executes a batch from an uploaded file of requests + sig do + params( + completion_window: + OpenAI::BatchCreateParams::CompletionWindow::OrSymbol, + endpoint: OpenAI::BatchCreateParams::Endpoint::OrSymbol, + input_file_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + output_expires_after: + OpenAI::BatchCreateParams::OutputExpiresAfter::OrHash, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Batch) + end + def create( + # The time frame within which the batch should be processed. Currently only `24h` + # is supported. + completion_window:, + # The endpoint to be used for all requests in the batch. Currently + # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + # are supported. Note that `/v1/embeddings` batches are also restricted to a + # maximum of 50,000 embedding inputs across all requests in the batch. + endpoint:, + # The ID of an uploaded file that contains requests for the new batch. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. + input_file_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The expiration policy for the output and/or error file that are generated for a + # batch. + output_expires_after: nil, + request_options: {} + ) + end + + # Retrieves a batch. + sig do + params( + batch_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Batch) + end + def retrieve( + # The ID of the batch to retrieve. + batch_id, + request_options: {} + ) + end + + # List your organization's batches. + sig do + params( + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Internal::CursorPage[OpenAI::Batch]) + end + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + request_options: {} + ) + end + + # Cancels an in-progress batch. The batch will be in status `cancelling` for up to + # 10 minutes, before changing to `cancelled`, where it will have partial results + # (if any) available in the output file. + sig do + params( + batch_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Batch) + end + def cancel( + # The ID of the batch to cancel. + batch_id, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/lib/openai/resources/beta.rbi b/rbi/openai/resources/beta.rbi similarity index 79% rename from rbi/lib/openai/resources/beta.rbi rename to rbi/openai/resources/beta.rbi index 3e97021d..c53baf2b 100644 --- a/rbi/lib/openai/resources/beta.rbi +++ b/rbi/openai/resources/beta.rbi @@ -4,13 +4,12 @@ module OpenAI module Resources class Beta sig { returns(OpenAI::Resources::Beta::Assistants) } - def assistants - end + attr_reader :assistants sig { returns(OpenAI::Resources::Beta::Threads) } - def threads - end + attr_reader :threads + # @api private sig { params(client: OpenAI::Client).returns(T.attached_class) } def self.new(client:) end diff --git a/rbi/openai/resources/beta/assistants.rbi b/rbi/openai/resources/beta/assistants.rbi new file mode 100644 index 00000000..6489d48e --- /dev/null +++ b/rbi/openai/resources/beta/assistants.rbi @@ -0,0 +1,292 @@ +# typed: strong + +module OpenAI + module Resources + class Beta + class Assistants + # Create an assistant with a model and instructions. + sig do + params( + model: T.any(String, OpenAI::ChatModel::OrSymbol), + description: T.nilable(String), + instructions: T.nilable(String), + metadata: T.nilable(T::Hash[Symbol, String]), + name: T.nilable(String), + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + tool_resources: + T.nilable( + OpenAI::Beta::AssistantCreateParams::ToolResources::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ], + top_p: T.nilable(Float), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Assistant) + end + def create( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model:, + # The description of the assistant. The maximum length is 512 characters. + description: nil, + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. + instructions: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the assistant. The maximum length is 256 characters. + name: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + temperature: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + tool_resources: nil, + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. + tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + request_options: {} + ) + end + + # Retrieves an assistant. + sig do + params( + assistant_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Assistant) + end + def retrieve( + # The ID of the assistant to retrieve. + assistant_id, + request_options: {} + ) + end + + # Modifies an assistant. + sig do + params( + assistant_id: String, + description: T.nilable(String), + instructions: T.nilable(String), + metadata: T.nilable(T::Hash[Symbol, String]), + model: + T.any( + String, + OpenAI::Beta::AssistantUpdateParams::Model::OrSymbol + ), + name: T.nilable(String), + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + tool_resources: + T.nilable( + OpenAI::Beta::AssistantUpdateParams::ToolResources::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ], + top_p: T.nilable(Float), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Assistant) + end + def update( + # The ID of the assistant to modify. + assistant_id, + # The description of the assistant. The maximum length is 512 characters. + description: nil, + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. + instructions: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model: nil, + # The name of the assistant. The maximum length is 256 characters. + name: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + temperature: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + tool_resources: nil, + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. + tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + request_options: {} + ) + end + + # Returns a list of assistants. + sig do + params( + after: String, + before: String, + limit: Integer, + order: OpenAI::Beta::AssistantListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Internal::CursorPage[OpenAI::Beta::Assistant]) + end + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + # Delete an assistant. + sig do + params( + assistant_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::AssistantDeleted) + end + def delete( + # The ID of the assistant to delete. + assistant_id, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/beta/threads.rbi b/rbi/openai/resources/beta/threads.rbi new file mode 100644 index 00000000..9efef885 --- /dev/null +++ b/rbi/openai/resources/beta/threads.rbi @@ -0,0 +1,415 @@ +# typed: strong + +module OpenAI + module Resources + class Beta + class Threads + sig { returns(OpenAI::Resources::Beta::Threads::Runs) } + attr_reader :runs + + sig { returns(OpenAI::Resources::Beta::Threads::Messages) } + attr_reader :messages + + # Create a thread. + sig do + params( + messages: + T::Array[OpenAI::Beta::ThreadCreateParams::Message::OrHash], + metadata: T.nilable(T::Hash[Symbol, String]), + tool_resources: + T.nilable( + OpenAI::Beta::ThreadCreateParams::ToolResources::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Thread) + end + def create( + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. + messages: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + tool_resources: nil, + request_options: {} + ) + end + + # Retrieves a thread. + sig do + params( + thread_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Thread) + end + def retrieve( + # The ID of the thread to retrieve. + thread_id, + request_options: {} + ) + end + + # Modifies a thread. + sig do + params( + thread_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + tool_resources: + T.nilable( + OpenAI::Beta::ThreadUpdateParams::ToolResources::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Thread) + end + def update( + # The ID of the thread to modify. Only the `metadata` can be modified. + thread_id, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. + tool_resources: nil, + request_options: {} + ) + end + + # Delete a thread. + sig do + params( + thread_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::ThreadDeleted) + end + def delete( + # The ID of the thread to delete. + thread_id, + request_options: {} + ) + end + + # See {OpenAI::Resources::Beta::Threads#stream_raw} for streaming counterpart. + # + # Create a thread and run it in one request. + sig do + params( + assistant_id: String, + instructions: T.nilable(String), + max_completion_tokens: T.nilable(Integer), + max_prompt_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: T.nilable(T.any(String, OpenAI::ChatModel::OrSymbol)), + parallel_tool_calls: T::Boolean, + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread::OrHash, + tool_choice: + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice::OrHash + ) + ), + tool_resources: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::OrHash + ), + tools: + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ] + ), + top_p: T.nilable(Float), + truncation_strategy: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::OrHash + ), + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::Run) + end + def create_and_run( + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. + assistant_id:, + # Override the default system message of the assistant. This is useful for + # modifying the behavior on a per-run basis. + instructions: nil, + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + max_completion_tokens: nil, + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + max_prompt_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. + model: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + parallel_tool_calls: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + temperature: nil, + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. + thread: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + tool_choice: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + tool_resources: nil, + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. + tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + truncation_strategy: nil, + # There is no need to provide `stream:`. Instead, use `#stream_raw` or + # `#create_and_run` for streaming and non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Beta::Threads#create_and_run} for non-streaming + # counterpart. + # + # Create a thread and run it in one request. + sig do + params( + assistant_id: String, + instructions: T.nilable(String), + max_completion_tokens: T.nilable(Integer), + max_prompt_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: T.nilable(T.any(String, OpenAI::ChatModel::OrSymbol)), + parallel_tool_calls: T::Boolean, + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread::OrHash, + tool_choice: + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice::OrHash + ) + ), + tool_resources: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::OrHash + ), + tools: + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ] + ), + top_p: T.nilable(Float), + truncation_strategy: + T.nilable( + OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::OrHash + ), + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Stream[ + OpenAI::Beta::AssistantStreamEvent::Variants + ] + ) + end + def stream_raw( + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. + assistant_id:, + # Override the default system message of the assistant. This is useful for + # modifying the behavior on a per-run basis. + instructions: nil, + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + max_completion_tokens: nil, + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. + max_prompt_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. + model: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + parallel_tool_calls: nil, + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + temperature: nil, + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. + thread: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + tool_choice: nil, + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. + tool_resources: nil, + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. + tools: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + # Controls for how a thread will be truncated prior to the run. Use this to + # control the initial context window of the run. + truncation_strategy: nil, + # There is no need to provide `stream:`. Instead, use `#stream_raw` or + # `#create_and_run` for streaming and non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/beta/threads/messages.rbi b/rbi/openai/resources/beta/threads/messages.rbi new file mode 100644 index 00000000..1562cf34 --- /dev/null +++ b/rbi/openai/resources/beta/threads/messages.rbi @@ -0,0 +1,159 @@ +# typed: strong + +module OpenAI + module Resources + class Beta + class Threads + class Messages + # Create a message. + sig do + params( + thread_id: String, + content: + OpenAI::Beta::Threads::MessageCreateParams::Content::Variants, + role: OpenAI::Beta::Threads::MessageCreateParams::Role::OrSymbol, + attachments: + T.nilable( + T::Array[ + OpenAI::Beta::Threads::MessageCreateParams::Attachment::OrHash + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::Message) + end + def create( + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # to create a message for. + thread_id, + # The text contents of the message. + content:, + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. + role:, + # A list of files attached to the message, and the tools they should be added to. + attachments: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) + end + + # Retrieve a message. + sig do + params( + message_id: String, + thread_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::Message) + end + def retrieve( + # The ID of the message to retrieve. + message_id, + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # to which this message belongs. + thread_id:, + request_options: {} + ) + end + + # Modifies a message. + sig do + params( + message_id: String, + thread_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::Message) + end + def update( + # Path param: The ID of the message to modify. + message_id, + # Path param: The ID of the thread to which this message belongs. + thread_id:, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) + end + + # Returns a list of messages for a given thread. + sig do + params( + thread_id: String, + after: String, + before: String, + limit: Integer, + order: OpenAI::Beta::Threads::MessageListParams::Order::OrSymbol, + run_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[OpenAI::Beta::Threads::Message] + ) + end + def list( + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # the messages belong to. + thread_id, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + # Filter messages by the run ID that generated them. + run_id: nil, + request_options: {} + ) + end + + # Deletes a message. + sig do + params( + message_id: String, + thread_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::MessageDeleted) + end + def delete( + # The ID of the message to delete. + message_id, + # The ID of the thread to which this message belongs. + thread_id:, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end + end +end diff --git a/rbi/openai/resources/beta/threads/runs.rbi b/rbi/openai/resources/beta/threads/runs.rbi new file mode 100644 index 00000000..d6e83222 --- /dev/null +++ b/rbi/openai/resources/beta/threads/runs.rbi @@ -0,0 +1,544 @@ +# typed: strong + +module OpenAI + module Resources + class Beta + class Threads + class Runs + sig { returns(OpenAI::Resources::Beta::Threads::Runs::Steps) } + attr_reader :steps + + # See {OpenAI::Resources::Beta::Threads::Runs#create_stream_raw} for streaming + # counterpart. + # + # Create a run. + sig do + params( + thread_id: String, + assistant_id: String, + include: + T::Array[OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol], + additional_instructions: T.nilable(String), + additional_messages: + T.nilable( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::OrHash + ] + ), + instructions: T.nilable(String), + max_completion_tokens: T.nilable(Integer), + max_prompt_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: T.nilable(T.any(String, OpenAI::ChatModel::OrSymbol)), + parallel_tool_calls: T::Boolean, + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + tool_choice: + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice::OrHash + ) + ), + tools: + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ] + ), + top_p: T.nilable(Float), + truncation_strategy: + T.nilable( + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::OrHash + ), + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::Run) + end + def create( + # Path param: The ID of the thread to run. + thread_id, + # Body param: The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. + assistant_id:, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + include: nil, + # Body param: Appends additional instructions at the end of the instructions for + # the run. This is useful for modifying the behavior on a per-run basis without + # overriding other instructions. + additional_instructions: nil, + # Body param: Adds additional messages to the thread before creating the run. + additional_messages: nil, + # Body param: Overrides the + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. + instructions: nil, + # Body param: The maximum number of completion tokens that may be used over the + # course of the run. The run will make a best effort to use only the number of + # completion tokens specified, across multiple turns of the run. If the run + # exceeds the number of completion tokens specified, the run will end with status + # `incomplete`. See `incomplete_details` for more info. + max_completion_tokens: nil, + # Body param: The maximum number of prompt tokens that may be used over the course + # of the run. The run will make a best effort to use only the number of prompt + # tokens specified, across multiple turns of the run. If the run exceeds the + # number of prompt tokens specified, the run will end with status `incomplete`. + # See `incomplete_details` for more info. + max_prompt_tokens: nil, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # Body param: The ID of the + # [Model](https://platform.openai.com/docs/api-reference/models) to be used to + # execute this run. If a value is provided here, it will override the model + # associated with the assistant. If not, the model associated with the assistant + # will be used. + model: nil, + # Body param: Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + parallel_tool_calls: nil, + # Body param: Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # Body param: Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # Body param: What sampling temperature to use, between 0 and 2. Higher values + # like 0.8 will make the output more random, while lower values like 0.2 will make + # it more focused and deterministic. + temperature: nil, + # Body param: Controls which (if any) tool is called by the model. `none` means + # the model will not call any tools and instead generates a message. `auto` is the + # default value and means the model can pick between generating a message or + # calling one or more tools. `required` means the model must call one or more + # tools before responding to the user. Specifying a particular tool like + # `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + tool_choice: nil, + # Body param: Override the tools the assistant can use for this run. This is + # useful for modifying the behavior on a per-run basis. + tools: nil, + # Body param: An alternative to sampling with temperature, called nucleus + # sampling, where the model considers the results of the tokens with top_p + # probability mass. So 0.1 means only the tokens comprising the top 10% + # probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + # Body param: Controls for how a thread will be truncated prior to the run. Use + # this to control the initial context window of the run. + truncation_strategy: nil, + # There is no need to provide `stream:`. Instead, use `#create_stream_raw` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Beta::Threads::Runs#create} for non-streaming + # counterpart. + # + # Create a run. + sig do + params( + thread_id: String, + assistant_id: String, + include: + T::Array[OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol], + additional_instructions: T.nilable(String), + additional_messages: + T.nilable( + T::Array[ + OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::OrHash + ] + ), + instructions: T.nilable(String), + max_completion_tokens: T.nilable(Integer), + max_prompt_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: T.nilable(T.any(String, OpenAI::ChatModel::OrSymbol)), + parallel_tool_calls: T::Boolean, + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.nilable( + T.any( + Symbol, + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash + ) + ), + temperature: T.nilable(Float), + tool_choice: + T.nilable( + T.any( + OpenAI::Beta::AssistantToolChoiceOption::Auto::OrSymbol, + OpenAI::Beta::AssistantToolChoice::OrHash + ) + ), + tools: + T.nilable( + T::Array[ + T.any( + OpenAI::Beta::CodeInterpreterTool::OrHash, + OpenAI::Beta::FileSearchTool::OrHash, + OpenAI::Beta::FunctionTool::OrHash + ) + ] + ), + top_p: T.nilable(Float), + truncation_strategy: + T.nilable( + OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::OrHash + ), + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Stream[ + OpenAI::Beta::AssistantStreamEvent::Variants + ] + ) + end + def create_stream_raw( + # Path param: The ID of the thread to run. + thread_id, + # Body param: The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. + assistant_id:, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + include: nil, + # Body param: Appends additional instructions at the end of the instructions for + # the run. This is useful for modifying the behavior on a per-run basis without + # overriding other instructions. + additional_instructions: nil, + # Body param: Adds additional messages to the thread before creating the run. + additional_messages: nil, + # Body param: Overrides the + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. + instructions: nil, + # Body param: The maximum number of completion tokens that may be used over the + # course of the run. The run will make a best effort to use only the number of + # completion tokens specified, across multiple turns of the run. If the run + # exceeds the number of completion tokens specified, the run will end with status + # `incomplete`. See `incomplete_details` for more info. + max_completion_tokens: nil, + # Body param: The maximum number of prompt tokens that may be used over the course + # of the run. The run will make a best effort to use only the number of prompt + # tokens specified, across multiple turns of the run. If the run exceeds the + # number of prompt tokens specified, the run will end with status `incomplete`. + # See `incomplete_details` for more info. + max_prompt_tokens: nil, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # Body param: The ID of the + # [Model](https://platform.openai.com/docs/api-reference/models) to be used to + # execute this run. If a value is provided here, it will override the model + # associated with the assistant. If not, the model associated with the assistant + # will be used. + model: nil, + # Body param: Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + parallel_tool_calls: nil, + # Body param: Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # Body param: Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. + response_format: nil, + # Body param: What sampling temperature to use, between 0 and 2. Higher values + # like 0.8 will make the output more random, while lower values like 0.2 will make + # it more focused and deterministic. + temperature: nil, + # Body param: Controls which (if any) tool is called by the model. `none` means + # the model will not call any tools and instead generates a message. `auto` is the + # default value and means the model can pick between generating a message or + # calling one or more tools. `required` means the model must call one or more + # tools before responding to the user. Specifying a particular tool like + # `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + tool_choice: nil, + # Body param: Override the tools the assistant can use for this run. This is + # useful for modifying the behavior on a per-run basis. + tools: nil, + # Body param: An alternative to sampling with temperature, called nucleus + # sampling, where the model considers the results of the tokens with top_p + # probability mass. So 0.1 means only the tokens comprising the top 10% + # probability mass are considered. + # + # We generally recommend altering this or temperature but not both. + top_p: nil, + # Body param: Controls for how a thread will be truncated prior to the run. Use + # this to control the initial context window of the run. + truncation_strategy: nil, + # There is no need to provide `stream:`. Instead, use `#create_stream_raw` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # Retrieves a run. + sig do + params( + run_id: String, + thread_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::Run) + end + def retrieve( + # The ID of the run to retrieve. + run_id, + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was run. + thread_id:, + request_options: {} + ) + end + + # Modifies a run. + sig do + params( + run_id: String, + thread_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::Run) + end + def update( + # Path param: The ID of the run to modify. + run_id, + # Path param: The ID of the + # [thread](https://platform.openai.com/docs/api-reference/threads) that was run. + thread_id:, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + request_options: {} + ) + end + + # Returns a list of runs belonging to a thread. + sig do + params( + thread_id: String, + after: String, + before: String, + limit: Integer, + order: OpenAI::Beta::Threads::RunListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Internal::CursorPage[OpenAI::Beta::Threads::Run]) + end + def list( + # The ID of the thread the run belongs to. + thread_id, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + # Cancels a run that is `in_progress`. + sig do + params( + run_id: String, + thread_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::Run) + end + def cancel( + # The ID of the run to cancel. + run_id, + # The ID of the thread to which this run belongs. + thread_id:, + request_options: {} + ) + end + + # See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs_stream_raw} for + # streaming counterpart. + # + # When a run has the `status: "requires_action"` and `required_action.type` is + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. + sig do + params( + run_id: String, + thread_id: String, + tool_outputs: + T::Array[ + OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput::OrHash + ], + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::Run) + end + def submit_tool_outputs( + # Path param: The ID of the run that requires the tool output submission. + run_id, + # Path param: The ID of the + # [thread](https://platform.openai.com/docs/api-reference/threads) to which this + # run belongs. + thread_id:, + # Body param: A list of tools for which the outputs are being submitted. + tool_outputs:, + # There is no need to provide `stream:`. Instead, use + # `#submit_tool_outputs_stream_raw` or `#submit_tool_outputs` for streaming and + # non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs} for + # non-streaming counterpart. + # + # When a run has the `status: "requires_action"` and `required_action.type` is + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. + sig do + params( + run_id: String, + thread_id: String, + tool_outputs: + T::Array[ + OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput::OrHash + ], + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Stream[ + OpenAI::Beta::AssistantStreamEvent::Variants + ] + ) + end + def submit_tool_outputs_stream_raw( + # Path param: The ID of the run that requires the tool output submission. + run_id, + # Path param: The ID of the + # [thread](https://platform.openai.com/docs/api-reference/threads) to which this + # run belongs. + thread_id:, + # Body param: A list of tools for which the outputs are being submitted. + tool_outputs:, + # There is no need to provide `stream:`. Instead, use + # `#submit_tool_outputs_stream_raw` or `#submit_tool_outputs` for streaming and + # non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end + end +end diff --git a/rbi/openai/resources/beta/threads/runs/steps.rbi b/rbi/openai/resources/beta/threads/runs/steps.rbi new file mode 100644 index 00000000..a70232d6 --- /dev/null +++ b/rbi/openai/resources/beta/threads/runs/steps.rbi @@ -0,0 +1,106 @@ +# typed: strong + +module OpenAI + module Resources + class Beta + class Threads + class Runs + class Steps + # Retrieves a run step. + sig do + params( + step_id: String, + thread_id: String, + run_id: String, + include: + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ], + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Beta::Threads::Runs::RunStep) + end + def retrieve( + # Path param: The ID of the run step to retrieve. + step_id, + # Path param: The ID of the thread to which the run and run step belongs. + thread_id:, + # Path param: The ID of the run to which the run step belongs. + run_id:, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + include: nil, + request_options: {} + ) + end + + # Returns a list of run steps belonging to a run. + sig do + params( + run_id: String, + thread_id: String, + after: String, + before: String, + include: + T::Array[ + OpenAI::Beta::Threads::Runs::RunStepInclude::OrSymbol + ], + limit: Integer, + order: + OpenAI::Beta::Threads::Runs::StepListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[ + OpenAI::Beta::Threads::Runs::RunStep + ] + ) + end + def list( + # Path param: The ID of the run the run steps belong to. + run_id, + # Path param: The ID of the thread the run and run steps belong to. + thread_id:, + # Query param: A cursor for use in pagination. `after` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, ending with obj_foo, your subsequent call can include + # after=obj_foo in order to fetch the next page of the list. + after: nil, + # Query param: A cursor for use in pagination. `before` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, starting with obj_foo, your subsequent call can include + # before=obj_foo in order to fetch the previous page of the list. + before: nil, + # Query param: A list of additional fields to include in the response. Currently + # the only supported value is + # `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + # search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. + include: nil, + # Query param: A limit on the number of objects to be returned. Limit can range + # between 1 and 100, and the default is 20. + limit: nil, + # Query param: Sort order by the `created_at` timestamp of the objects. `asc` for + # ascending order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end + end + end +end diff --git a/rbi/lib/openai/resources/chat.rbi b/rbi/openai/resources/chat.rbi similarity index 82% rename from rbi/lib/openai/resources/chat.rbi rename to rbi/openai/resources/chat.rbi index 4d090d3f..45c92d55 100644 --- a/rbi/lib/openai/resources/chat.rbi +++ b/rbi/openai/resources/chat.rbi @@ -4,9 +4,9 @@ module OpenAI module Resources class Chat sig { returns(OpenAI::Resources::Chat::Completions) } - def completions - end + attr_reader :completions + # @api private sig { params(client: OpenAI::Client).returns(T.attached_class) } def self.new(client:) end diff --git a/rbi/openai/resources/chat/completions.rbi b/rbi/openai/resources/chat/completions.rbi new file mode 100644 index 00000000..72cdcd20 --- /dev/null +++ b/rbi/openai/resources/chat/completions.rbi @@ -0,0 +1,735 @@ +# typed: strong + +module OpenAI + module Resources + class Chat + class Completions + sig { returns(OpenAI::Resources::Chat::Completions::Messages) } + attr_reader :messages + + # See {OpenAI::Resources::Chat::Completions#stream_raw} for streaming counterpart. + # + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + # + # --- + # + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. + # + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + sig do + params( + messages: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionDeveloperMessageParam::OrHash, + OpenAI::Chat::ChatCompletionSystemMessageParam::OrHash, + OpenAI::Chat::ChatCompletionUserMessageParam::OrHash, + OpenAI::Chat::ChatCompletionAssistantMessageParam::OrHash, + OpenAI::Chat::ChatCompletionToolMessageParam::OrHash, + OpenAI::Chat::ChatCompletionFunctionMessageParam::OrHash + ) + ], + model: T.any(String, OpenAI::ChatModel::OrSymbol), + audio: T.nilable(OpenAI::Chat::ChatCompletionAudioParam::OrHash), + frequency_penalty: T.nilable(Float), + function_call: + T.any( + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode::OrSymbol, + OpenAI::Chat::ChatCompletionFunctionCallOption::OrHash + ), + functions: + T::Array[OpenAI::Chat::CompletionCreateParams::Function::OrHash], + logit_bias: T.nilable(T::Hash[Symbol, Integer]), + logprobs: T.nilable(T::Boolean), + max_completion_tokens: T.nilable(Integer), + max_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + modalities: + T.nilable( + T::Array[ + OpenAI::Chat::CompletionCreateParams::Modality::OrSymbol + ] + ), + n: T.nilable(Integer), + parallel_tool_calls: T::Boolean, + prediction: + T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash), + presence_penalty: T.nilable(Float), + prompt_cache_key: String, + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ), + safety_identifier: String, + seed: T.nilable(Integer), + service_tier: + T.nilable( + OpenAI::Chat::CompletionCreateParams::ServiceTier::OrSymbol + ), + stop: + T.nilable(OpenAI::Chat::CompletionCreateParams::Stop::Variants), + store: T.nilable(T::Boolean), + stream_options: + T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash), + temperature: T.nilable(Float), + tool_choice: + T.any( + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, + OpenAI::Chat::ChatCompletionAllowedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionFunctionTool::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::OrHash + ) + ], + top_logprobs: T.nilable(Integer), + top_p: T.nilable(Float), + user: String, + verbosity: + T.nilable( + OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol + ), + web_search_options: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::OrHash, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Chat::ChatCompletion) + end + def create( + # A list of messages comprising the conversation so far. Depending on the + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). + messages:, + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + model:, + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). + audio: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + frequency_penalty: nil, + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. + function_call: nil, + # Deprecated in favor of `tools`. + # + # A list of functions the model may generate JSON inputs for. + functions: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. + logit_bias: nil, + # Whether to return log probabilities of the output tokens or not. If true, + # returns the log probabilities of each output token returned in the `content` of + # `message`. + logprobs: nil, + # An upper bound for the number of tokens that can be generated for a completion, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_completion_tokens: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the chat + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. + # + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o-series models](https://platform.openai.com/docs/guides/reasoning). + max_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # Output types that you would like the model to generate. Most models are capable + # of generating text, which is the default: + # + # `["text"]` + # + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: + # + # `["text", "audio"]` + modalities: nil, + # How many chat completion choices to generate for each input message. Note that + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. + n: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + parallel_tool_calls: nil, + # Static predicted output content, such as the content of a text file that is + # being regenerated. + prediction: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + presence_penalty: nil, + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_key: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + response_format: nil, + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + safety_identifier: nil, + # This feature is in Beta. If specified, our system will make a best effort to + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. + seed: nil, + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + service_tier: nil, + # Not supported with latest reasoning models `o3` and `o4-mini`. + # + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. + stop: nil, + # Whether or not to store the output of this chat completion request for use in + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. + # + # Supports text and image inputs. Note: image inputs over 8MB will be dropped. + store: nil, + # Options for streaming response. Only set this when you set `stream: true`. + stream_options: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. + temperature: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. + tool_choice: nil, + # A list of tools the model may call. You can provide either + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # or [function tools](https://platform.openai.com/docs/guides/function-calling). + tools: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. + top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + top_p: nil, + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + user: nil, + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + verbosity: nil, + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + web_search_options: nil, + # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create` + # for streaming and non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Chat::Completions#create} for non-streaming counterpart. + # + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + # + # --- + # + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. + # + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + sig do + params( + messages: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionDeveloperMessageParam::OrHash, + OpenAI::Chat::ChatCompletionSystemMessageParam::OrHash, + OpenAI::Chat::ChatCompletionUserMessageParam::OrHash, + OpenAI::Chat::ChatCompletionAssistantMessageParam::OrHash, + OpenAI::Chat::ChatCompletionToolMessageParam::OrHash, + OpenAI::Chat::ChatCompletionFunctionMessageParam::OrHash + ) + ], + model: T.any(String, OpenAI::ChatModel::OrSymbol), + audio: T.nilable(OpenAI::Chat::ChatCompletionAudioParam::OrHash), + frequency_penalty: T.nilable(Float), + function_call: + T.any( + OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode::OrSymbol, + OpenAI::Chat::ChatCompletionFunctionCallOption::OrHash + ), + functions: + T::Array[OpenAI::Chat::CompletionCreateParams::Function::OrHash], + logit_bias: T.nilable(T::Hash[Symbol, Integer]), + logprobs: T.nilable(T::Boolean), + max_completion_tokens: T.nilable(Integer), + max_tokens: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + modalities: + T.nilable( + T::Array[ + OpenAI::Chat::CompletionCreateParams::Modality::OrSymbol + ] + ), + n: T.nilable(Integer), + parallel_tool_calls: T::Boolean, + prediction: + T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash), + presence_penalty: T.nilable(Float), + prompt_cache_key: String, + reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol), + response_format: + T.any( + OpenAI::ResponseFormatText::OrHash, + OpenAI::ResponseFormatJSONSchema::OrHash, + OpenAI::ResponseFormatJSONObject::OrHash + ), + safety_identifier: String, + seed: T.nilable(Integer), + service_tier: + T.nilable( + OpenAI::Chat::CompletionCreateParams::ServiceTier::OrSymbol + ), + stop: + T.nilable(OpenAI::Chat::CompletionCreateParams::Stop::Variants), + store: T.nilable(T::Boolean), + stream_options: + T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash), + temperature: T.nilable(Float), + tool_choice: + T.any( + OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, + OpenAI::Chat::ChatCompletionAllowedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionFunctionTool::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::OrHash + ) + ], + top_logprobs: T.nilable(Integer), + top_p: T.nilable(Float), + user: String, + verbosity: + T.nilable( + OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol + ), + web_search_options: + OpenAI::Chat::CompletionCreateParams::WebSearchOptions::OrHash, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Internal::Stream[OpenAI::Chat::ChatCompletionChunk]) + end + def stream_raw( + # A list of messages comprising the conversation so far. Depending on the + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). + messages:, + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + model:, + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). + audio: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + frequency_penalty: nil, + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. + function_call: nil, + # Deprecated in favor of `tools`. + # + # A list of functions the model may generate JSON inputs for. + functions: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. + logit_bias: nil, + # Whether to return log probabilities of the output tokens or not. If true, + # returns the log probabilities of each output token returned in the `content` of + # `message`. + logprobs: nil, + # An upper bound for the number of tokens that can be generated for a completion, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_completion_tokens: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the chat + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. + # + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o-series models](https://platform.openai.com/docs/guides/reasoning). + max_tokens: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # Output types that you would like the model to generate. Most models are capable + # of generating text, which is the default: + # + # `["text"]` + # + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: + # + # `["text", "audio"]` + modalities: nil, + # How many chat completion choices to generate for each input message. Note that + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. + n: nil, + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. + parallel_tool_calls: nil, + # Static predicted output content, such as the content of a text file that is + # being regenerated. + prediction: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + presence_penalty: nil, + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_key: nil, + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. + reasoning_effort: nil, + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. + response_format: nil, + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + safety_identifier: nil, + # This feature is in Beta. If specified, our system will make a best effort to + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. + seed: nil, + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + service_tier: nil, + # Not supported with latest reasoning models `o3` and `o4-mini`. + # + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. + stop: nil, + # Whether or not to store the output of this chat completion request for use in + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. + # + # Supports text and image inputs. Note: image inputs over 8MB will be dropped. + store: nil, + # Options for streaming response. Only set this when you set `stream: true`. + stream_options: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. + temperature: nil, + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. + tool_choice: nil, + # A list of tools the model may call. You can provide either + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # or [function tools](https://platform.openai.com/docs/guides/function-calling). + tools: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. + top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + top_p: nil, + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + user: nil, + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + verbosity: nil, + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + web_search_options: nil, + # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create` + # for streaming and non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # Get a stored chat completion. Only Chat Completions that have been created with + # the `store` parameter set to `true` will be returned. + sig do + params( + completion_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Chat::ChatCompletion) + end + def retrieve( + # The ID of the chat completion to retrieve. + completion_id, + request_options: {} + ) + end + + # Modify a stored chat completion. Only Chat Completions that have been created + # with the `store` parameter set to `true` can be modified. Currently, the only + # supported modification is to update the `metadata` field. + sig do + params( + completion_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Chat::ChatCompletion) + end + def update( + # The ID of the chat completion to update. + completion_id, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata:, + request_options: {} + ) + end + + # List stored Chat Completions. Only Chat Completions that have been stored with + # the `store` parameter set to `true` will be returned. + sig do + params( + after: String, + limit: Integer, + metadata: T.nilable(T::Hash[Symbol, String]), + model: String, + order: OpenAI::Chat::CompletionListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Internal::CursorPage[OpenAI::Chat::ChatCompletion]) + end + def list( + # Identifier for the last chat completion from the previous pagination request. + after: nil, + # Number of Chat Completions to retrieve. + limit: nil, + # A list of metadata keys to filter the Chat Completions by. Example: + # + # `metadata[key1]=value1&metadata[key2]=value2` + metadata: nil, + # The model used to generate the Chat Completions. + model: nil, + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. + order: nil, + request_options: {} + ) + end + + # Delete a stored chat completion. Only Chat Completions that have been created + # with the `store` parameter set to `true` can be deleted. + sig do + params( + completion_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Chat::ChatCompletionDeleted) + end + def delete( + # The ID of the chat completion to delete. + completion_id, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/chat/completions/messages.rbi b/rbi/openai/resources/chat/completions/messages.rbi new file mode 100644 index 00000000..69a906e7 --- /dev/null +++ b/rbi/openai/resources/chat/completions/messages.rbi @@ -0,0 +1,46 @@ +# typed: strong + +module OpenAI + module Resources + class Chat + class Completions + class Messages + # Get the messages in a stored chat completion. Only Chat Completions that have + # been created with the `store` parameter set to `true` will be returned. + sig do + params( + completion_id: String, + after: String, + limit: Integer, + order: + OpenAI::Chat::Completions::MessageListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[ + OpenAI::Chat::ChatCompletionStoreMessage + ] + ) + end + def list( + # The ID of the chat completion to retrieve messages from. + completion_id, + # Identifier for the last message from the previous pagination request. + after: nil, + # Number of messages to retrieve. + limit: nil, + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + # for descending order. Defaults to `asc`. + order: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end + end +end diff --git a/rbi/openai/resources/completions.rbi b/rbi/openai/resources/completions.rbi new file mode 100644 index 00000000..507da5ba --- /dev/null +++ b/rbi/openai/resources/completions.rbi @@ -0,0 +1,292 @@ +# typed: strong + +module OpenAI + module Resources + class Completions + # See {OpenAI::Resources::Completions#create_streaming} for streaming counterpart. + # + # Creates a completion for the provided prompt and parameters. + sig do + params( + model: T.any(String, OpenAI::CompletionCreateParams::Model::OrSymbol), + prompt: T.nilable(OpenAI::CompletionCreateParams::Prompt::Variants), + best_of: T.nilable(Integer), + echo: T.nilable(T::Boolean), + frequency_penalty: T.nilable(Float), + logit_bias: T.nilable(T::Hash[Symbol, Integer]), + logprobs: T.nilable(Integer), + max_tokens: T.nilable(Integer), + n: T.nilable(Integer), + presence_penalty: T.nilable(Float), + seed: T.nilable(Integer), + stop: T.nilable(OpenAI::CompletionCreateParams::Stop::Variants), + stream_options: + T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash), + suffix: T.nilable(String), + temperature: T.nilable(Float), + top_p: T.nilable(Float), + user: String, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Completion) + end + def create( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model:, + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. + prompt:, + # Generates `best_of` completions server-side and returns the "best" (the one with + # the highest log probability per token). Results cannot be streamed. + # + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. + best_of: nil, + # Echo back the prompt in addition to the completion + echo: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + frequency_penalty: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. + # + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. + logit_bias: nil, + # Include the log probabilities on the `logprobs` most likely output tokens, as + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. + # + # The maximum value for `logprobs` is 5. + logprobs: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the + # completion. + # + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. + max_tokens: nil, + # How many completions to generate for each prompt. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. + n: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + presence_penalty: nil, + # If specified, our system will make a best effort to sample deterministically, + # such that repeated requests with the same `seed` and parameters should return + # the same result. + # + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. + seed: nil, + # Not supported with latest reasoning models `o3` and `o4-mini`. + # + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. + stop: nil, + # Options for streaming response. Only set this when you set `stream: true`. + stream_options: nil, + # The suffix that comes after a completion of inserted text. + # + # This parameter is only supported for `gpt-3.5-turbo-instruct`. + suffix: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + # + # We generally recommend altering this or `top_p` but not both. + temperature: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Completions#create} for non-streaming counterpart. + # + # Creates a completion for the provided prompt and parameters. + sig do + params( + model: T.any(String, OpenAI::CompletionCreateParams::Model::OrSymbol), + prompt: T.nilable(OpenAI::CompletionCreateParams::Prompt::Variants), + best_of: T.nilable(Integer), + echo: T.nilable(T::Boolean), + frequency_penalty: T.nilable(Float), + logit_bias: T.nilable(T::Hash[Symbol, Integer]), + logprobs: T.nilable(Integer), + max_tokens: T.nilable(Integer), + n: T.nilable(Integer), + presence_penalty: T.nilable(Float), + seed: T.nilable(Integer), + stop: T.nilable(OpenAI::CompletionCreateParams::Stop::Variants), + stream_options: + T.nilable(OpenAI::Chat::ChatCompletionStreamOptions::OrHash), + suffix: T.nilable(String), + temperature: T.nilable(Float), + top_p: T.nilable(Float), + user: String, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Internal::Stream[OpenAI::Completion]) + end + def create_streaming( + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model:, + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. + prompt:, + # Generates `best_of` completions server-side and returns the "best" (the one with + # the highest log probability per token). Results cannot be streamed. + # + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. + best_of: nil, + # Echo back the prompt in addition to the completion + echo: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + frequency_penalty: nil, + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. + # + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. + logit_bias: nil, + # Include the log probabilities on the `logprobs` most likely output tokens, as + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. + # + # The maximum value for `logprobs` is 5. + logprobs: nil, + # The maximum number of [tokens](/tokenizer) that can be generated in the + # completion. + # + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. + max_tokens: nil, + # How many completions to generate for each prompt. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. + n: nil, + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) + presence_penalty: nil, + # If specified, our system will make a best effort to sample deterministically, + # such that repeated requests with the same `seed` and parameters should return + # the same result. + # + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. + seed: nil, + # Not supported with latest reasoning models `o3` and `o4-mini`. + # + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. + stop: nil, + # Options for streaming response. Only set this when you set `stream: true`. + stream_options: nil, + # The suffix that comes after a completion of inserted text. + # + # This parameter is only supported for `gpt-3.5-turbo-instruct`. + suffix: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + # + # We generally recommend altering this or `top_p` but not both. + temperature: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + top_p: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + # There is no need to provide `stream:`. Instead, use `#create_streaming` or + # `#create` for streaming and non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/containers.rbi b/rbi/openai/resources/containers.rbi new file mode 100644 index 00000000..fc23a5f6 --- /dev/null +++ b/rbi/openai/resources/containers.rbi @@ -0,0 +1,86 @@ +# typed: strong + +module OpenAI + module Resources + class Containers + sig { returns(OpenAI::Resources::Containers::Files) } + attr_reader :files + + # Create Container + sig do + params( + name: String, + expires_after: OpenAI::ContainerCreateParams::ExpiresAfter::OrHash, + file_ids: T::Array[String], + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::ContainerCreateResponse) + end + def create( + # Name of the container to create. + name:, + # Container expiration time in seconds relative to the 'anchor' time. + expires_after: nil, + # IDs of files to copy to the container. + file_ids: nil, + request_options: {} + ) + end + + # Retrieve Container + sig do + params( + container_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::ContainerRetrieveResponse) + end + def retrieve(container_id, request_options: {}) + end + + # List Containers + sig do + params( + after: String, + limit: Integer, + order: OpenAI::ContainerListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[OpenAI::Models::ContainerListResponse] + ) + end + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + # Delete Container + sig do + params( + container_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).void + end + def delete( + # The ID of the container to delete. + container_id, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/containers/files.rbi b/rbi/openai/resources/containers/files.rbi new file mode 100644 index 00000000..3f49be09 --- /dev/null +++ b/rbi/openai/resources/containers/files.rbi @@ -0,0 +1,92 @@ +# typed: strong + +module OpenAI + module Resources + class Containers + class Files + sig { returns(OpenAI::Resources::Containers::Files::Content) } + attr_reader :content + + # Create a Container File + # + # You can send either a multipart/form-data request with the raw file content, or + # a JSON request with a file ID. + sig do + params( + container_id: String, + file: OpenAI::Internal::FileInput, + file_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::Containers::FileCreateResponse) + end + def create( + container_id, + # The File object (not file name) to be uploaded. + file: nil, + # Name of the file to create. + file_id: nil, + request_options: {} + ) + end + + # Retrieve Container File + sig do + params( + file_id: String, + container_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::Containers::FileRetrieveResponse) + end + def retrieve(file_id, container_id:, request_options: {}) + end + + # List Container files + sig do + params( + container_id: String, + after: String, + limit: Integer, + order: OpenAI::Containers::FileListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[ + OpenAI::Models::Containers::FileListResponse + ] + ) + end + def list( + container_id, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + # Delete Container File + sig do + params( + file_id: String, + container_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).void + end + def delete(file_id, container_id:, request_options: {}) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/containers/files/content.rbi b/rbi/openai/resources/containers/files/content.rbi new file mode 100644 index 00000000..bb901b30 --- /dev/null +++ b/rbi/openai/resources/containers/files/content.rbi @@ -0,0 +1,27 @@ +# typed: strong + +module OpenAI + module Resources + class Containers + class Files + class Content + # Retrieve Container File Content + sig do + params( + file_id: String, + container_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(StringIO) + end + def retrieve(file_id, container_id:, request_options: {}) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end + end +end diff --git a/rbi/openai/resources/conversations.rbi b/rbi/openai/resources/conversations.rbi new file mode 100644 index 00000000..6d83ca4e --- /dev/null +++ b/rbi/openai/resources/conversations.rbi @@ -0,0 +1,110 @@ +# typed: strong + +module OpenAI + module Resources + class Conversations + sig { returns(OpenAI::Resources::Conversations::Items) } + attr_reader :items + + # Create a conversation with the given ID. + sig do + params( + items: + T.nilable( + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage::OrHash, + OpenAI::Responses::ResponseInputItem::Message::OrHash, + OpenAI::Responses::ResponseOutputMessage::OrHash, + OpenAI::Responses::ResponseFileSearchToolCall::OrHash, + OpenAI::Responses::ResponseComputerToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::OrHash, + OpenAI::Responses::ResponseFunctionToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::OrHash, + OpenAI::Responses::ResponseReasoningItem::OrHash, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::OrHash, + OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::McpListTools::OrHash, + OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash, + OpenAI::Responses::ResponseInputItem::McpCall::OrHash, + OpenAI::Responses::ResponseCustomToolCallOutput::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::ItemReference::OrHash + ) + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Conversations::Conversation) + end + def create( + # Initial items to include in the conversation context. You may add up to 20 items + # at a time. + items: nil, + # Set of 16 key-value pairs that can be attached to an object. Useful for storing + # additional information about the object in a structured format. + metadata: nil, + request_options: {} + ) + end + + # Get a conversation with the given ID. + sig do + params( + conversation_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Conversations::Conversation) + end + def retrieve( + # The ID of the conversation to retrieve. + conversation_id, + request_options: {} + ) + end + + # Update a conversation's metadata with the given ID. + sig do + params( + conversation_id: String, + metadata: T::Hash[Symbol, String], + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Conversations::Conversation) + end + def update( + # The ID of the conversation to update. + conversation_id, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters. + metadata:, + request_options: {} + ) + end + + # Delete a conversation with the given ID. + sig do + params( + conversation_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Conversations::ConversationDeletedResource) + end + def delete( + # The ID of the conversation to delete. + conversation_id, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/conversations/items.rbi b/rbi/openai/resources/conversations/items.rbi new file mode 100644 index 00000000..fb396699 --- /dev/null +++ b/rbi/openai/resources/conversations/items.rbi @@ -0,0 +1,152 @@ +# typed: strong + +module OpenAI + module Resources + class Conversations + class Items + # Create items in a conversation with the given ID. + sig do + params( + conversation_id: String, + items: + T::Array[ + T.any( + OpenAI::Responses::EasyInputMessage::OrHash, + OpenAI::Responses::ResponseInputItem::Message::OrHash, + OpenAI::Responses::ResponseOutputMessage::OrHash, + OpenAI::Responses::ResponseFileSearchToolCall::OrHash, + OpenAI::Responses::ResponseComputerToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::ComputerCallOutput::OrHash, + OpenAI::Responses::ResponseFunctionWebSearch::OrHash, + OpenAI::Responses::ResponseFunctionToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::FunctionCallOutput::OrHash, + OpenAI::Responses::ResponseReasoningItem::OrHash, + OpenAI::Responses::ResponseInputItem::ImageGenerationCall::OrHash, + OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash, + OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash, + OpenAI::Responses::ResponseInputItem::McpListTools::OrHash, + OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash, + OpenAI::Responses::ResponseInputItem::McpCall::OrHash, + OpenAI::Responses::ResponseCustomToolCallOutput::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash, + OpenAI::Responses::ResponseInputItem::ItemReference::OrHash + ) + ], + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Conversations::ConversationItemList) + end + def create( + # Path param: The ID of the conversation to add the item to. + conversation_id, + # Body param: The items to add to the conversation. You may add up to 20 items at + # a time. + items:, + # Query param: Additional fields to include in the response. See the `include` + # parameter for + # [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + # for more information. + include: nil, + request_options: {} + ) + end + + # Get a single item from a conversation with the given IDs. + sig do + params( + item_id: String, + conversation_id: String, + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Conversations::ConversationItem::Variants) + end + def retrieve( + # Path param: The ID of the item to retrieve. + item_id, + # Path param: The ID of the conversation that contains the item. + conversation_id:, + # Query param: Additional fields to include in the response. See the `include` + # parameter for + # [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + # for more information. + include: nil, + request_options: {} + ) + end + + # List all items for a conversation with the given ID. + sig do + params( + conversation_id: String, + after: String, + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + limit: Integer, + order: OpenAI::Conversations::ItemListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::ConversationCursorPage[ + OpenAI::Conversations::ConversationItem::Variants + ] + ) + end + def list( + # The ID of the conversation to list items for. + conversation_id, + # An item ID to list items after, used in pagination. + after: nil, + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). + include: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + order: nil, + request_options: {} + ) + end + + # Delete an item from a conversation with the given IDs. + sig do + params( + item_id: String, + conversation_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Conversations::Conversation) + end + def delete( + # The ID of the item to delete. + item_id, + # The ID of the conversation that contains the item. + conversation_id:, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/embeddings.rbi b/rbi/openai/resources/embeddings.rbi new file mode 100644 index 00000000..bfdafb67 --- /dev/null +++ b/rbi/openai/resources/embeddings.rbi @@ -0,0 +1,55 @@ +# typed: strong + +module OpenAI + module Resources + class Embeddings + # Creates an embedding vector representing the input text. + sig do + params( + input: OpenAI::EmbeddingCreateParams::Input::Variants, + model: T.any(String, OpenAI::EmbeddingModel::OrSymbol), + dimensions: Integer, + encoding_format: + OpenAI::EmbeddingCreateParams::EncodingFormat::OrSymbol, + user: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::CreateEmbeddingResponse) + end + def create( + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # all embedding models), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. In addition to the per-input token limit, all embedding + # models enforce a maximum of 300,000 tokens summed across all inputs in a single + # request. + input:, + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. + model:, + # The number of dimensions the resulting output embeddings should have. Only + # supported in `text-embedding-3` and later models. + dimensions: nil, + # The format to return the embeddings in. Can be either `float` or + # [`base64`](https://pypi.org/project/pybase64/). + encoding_format: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/evals.rbi b/rbi/openai/resources/evals.rbi new file mode 100644 index 00000000..9ec9f490 --- /dev/null +++ b/rbi/openai/resources/evals.rbi @@ -0,0 +1,146 @@ +# typed: strong + +module OpenAI + module Resources + class Evals + sig { returns(OpenAI::Resources::Evals::Runs) } + attr_reader :runs + + # Create the structure of an evaluation that can be used to test a model's + # performance. An evaluation is a set of testing criteria and the config for a + # data source, which dictates the schema of the data used in the evaluation. After + # creating an evaluation, you can run it on different models and model parameters. + # We support several types of graders and datasources. For more information, see + # the [Evals guide](https://platform.openai.com/docs/guides/evals). + sig do + params( + data_source_config: + T.any( + OpenAI::EvalCreateParams::DataSourceConfig::Custom::OrHash, + OpenAI::EvalCreateParams::DataSourceConfig::Logs::OrHash, + OpenAI::EvalCreateParams::DataSourceConfig::StoredCompletions::OrHash + ), + testing_criteria: + T::Array[ + T.any( + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::OrHash, + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::EvalCreateParams::TestingCriterion::TextSimilarity::OrHash, + OpenAI::EvalCreateParams::TestingCriterion::Python::OrHash, + OpenAI::EvalCreateParams::TestingCriterion::ScoreModel::OrHash + ) + ], + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::EvalCreateResponse) + end + def create( + # The configuration for the data source used for the evaluation runs. Dictates the + # schema of the data used in the evaluation. + data_source_config:, + # A list of graders for all eval runs in this group. Graders can reference + # variables in the data source using double curly braces notation, like + # `{{item.variable_name}}`. To reference the model's output, use the `sample` + # namespace (ie, `{{sample.output_text}}`). + testing_criteria:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the evaluation. + name: nil, + request_options: {} + ) + end + + # Get an evaluation by ID. + sig do + params( + eval_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::EvalRetrieveResponse) + end + def retrieve( + # The ID of the evaluation to retrieve. + eval_id, + request_options: {} + ) + end + + # Update certain properties of an evaluation. + sig do + params( + eval_id: String, + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::EvalUpdateResponse) + end + def update( + # The ID of the evaluation to update. + eval_id, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # Rename the evaluation. + name: nil, + request_options: {} + ) + end + + # List evaluations for a project. + sig do + params( + after: String, + limit: Integer, + order: OpenAI::EvalListParams::Order::OrSymbol, + order_by: OpenAI::EvalListParams::OrderBy::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[OpenAI::Models::EvalListResponse] + ) + end + def list( + # Identifier for the last eval from the previous pagination request. + after: nil, + # Number of evals to retrieve. + limit: nil, + # Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for + # descending order. + order: nil, + # Evals can be ordered by creation time or last updated time. Use `created_at` for + # creation time or `updated_at` for last updated time. + order_by: nil, + request_options: {} + ) + end + + # Delete an evaluation. + sig do + params( + eval_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::EvalDeleteResponse) + end + def delete( + # The ID of the evaluation to delete. + eval_id, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/evals/runs.rbi b/rbi/openai/resources/evals/runs.rbi new file mode 100644 index 00000000..20df812d --- /dev/null +++ b/rbi/openai/resources/evals/runs.rbi @@ -0,0 +1,133 @@ +# typed: strong + +module OpenAI + module Resources + class Evals + class Runs + sig { returns(OpenAI::Resources::Evals::Runs::OutputItems) } + attr_reader :output_items + + # Kicks off a new run for a given evaluation, specifying the data source, and what + # model configuration to use to test. The datasource will be validated against the + # schema specified in the config of the evaluation. + sig do + params( + eval_id: String, + data_source: + T.any( + OpenAI::Evals::CreateEvalJSONLRunDataSource::OrHash, + OpenAI::Evals::CreateEvalCompletionsRunDataSource::OrHash, + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::OrHash + ), + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::Evals::RunCreateResponse) + end + def create( + # The ID of the evaluation to create a run for. + eval_id, + # Details about the run's data source. + data_source:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the run. + name: nil, + request_options: {} + ) + end + + # Get an evaluation run by ID. + sig do + params( + run_id: String, + eval_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::Evals::RunRetrieveResponse) + end + def retrieve( + # The ID of the run to retrieve. + run_id, + # The ID of the evaluation to retrieve runs for. + eval_id:, + request_options: {} + ) + end + + # Get a list of runs for an evaluation. + sig do + params( + eval_id: String, + after: String, + limit: Integer, + order: OpenAI::Evals::RunListParams::Order::OrSymbol, + status: OpenAI::Evals::RunListParams::Status::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[OpenAI::Models::Evals::RunListResponse] + ) + end + def list( + # The ID of the evaluation to retrieve runs for. + eval_id, + # Identifier for the last run from the previous pagination request. + after: nil, + # Number of runs to retrieve. + limit: nil, + # Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for + # descending order. Defaults to `asc`. + order: nil, + # Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + # | `canceled`. + status: nil, + request_options: {} + ) + end + + # Delete an eval run. + sig do + params( + run_id: String, + eval_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::Evals::RunDeleteResponse) + end + def delete( + # The ID of the run to delete. + run_id, + # The ID of the evaluation to delete the run from. + eval_id:, + request_options: {} + ) + end + + # Cancel an ongoing evaluation run. + sig do + params( + run_id: String, + eval_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::Evals::RunCancelResponse) + end + def cancel( + # The ID of the run to cancel. + run_id, + # The ID of the evaluation whose run you want to cancel. + eval_id:, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/evals/runs/output_items.rbi b/rbi/openai/resources/evals/runs/output_items.rbi new file mode 100644 index 00000000..8e8f764b --- /dev/null +++ b/rbi/openai/resources/evals/runs/output_items.rbi @@ -0,0 +1,73 @@ +# typed: strong + +module OpenAI + module Resources + class Evals + class Runs + class OutputItems + # Get an evaluation run output item by ID. + sig do + params( + output_item_id: String, + eval_id: String, + run_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse) + end + def retrieve( + # The ID of the output item to retrieve. + output_item_id, + # The ID of the evaluation to retrieve runs for. + eval_id:, + # The ID of the run to retrieve. + run_id:, + request_options: {} + ) + end + + # Get a list of output items for an evaluation run. + sig do + params( + run_id: String, + eval_id: String, + after: String, + limit: Integer, + order: OpenAI::Evals::Runs::OutputItemListParams::Order::OrSymbol, + status: + OpenAI::Evals::Runs::OutputItemListParams::Status::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[ + OpenAI::Models::Evals::Runs::OutputItemListResponse + ] + ) + end + def list( + # Path param: The ID of the run to retrieve output items for. + run_id, + # Path param: The ID of the evaluation to retrieve runs for. + eval_id:, + # Query param: Identifier for the last output item from the previous pagination + # request. + after: nil, + # Query param: Number of output items to retrieve. + limit: nil, + # Query param: Sort order for output items by timestamp. Use `asc` for ascending + # order or `desc` for descending order. Defaults to `asc`. + order: nil, + # Query param: Filter output items by status. Use `failed` to filter by failed + # output items or `pass` to filter by passed output items. + status: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end + end +end diff --git a/rbi/openai/resources/files.rbi b/rbi/openai/resources/files.rbi new file mode 100644 index 00000000..0b28857e --- /dev/null +++ b/rbi/openai/resources/files.rbi @@ -0,0 +1,126 @@ +# typed: strong + +module OpenAI + module Resources + class Files + # Upload a file that can be used across various endpoints. Individual files can be + # up to 512 MB, and the size of all files uploaded by one organization can be up + # to 1 TB. + # + # The Assistants API supports files up to 2 million tokens and of specific file + # types. See the + # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for + # details. + # + # The Fine-tuning API only supports `.jsonl` files. The input also has certain + # required formats for fine-tuning + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # models. + # + # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also + # has a specific required + # [format](https://platform.openai.com/docs/api-reference/batch/request-input). + # + # Please [contact us](https://help.openai.com/) if you need to increase these + # storage limits. + sig do + params( + file: OpenAI::Internal::FileInput, + purpose: OpenAI::FilePurpose::OrSymbol, + expires_after: OpenAI::FileCreateParams::ExpiresAfter::OrHash, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::FileObject) + end + def create( + # The File object (not file name) to be uploaded. + file:, + # The intended purpose of the uploaded file. One of: - `assistants`: Used in the + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets + purpose:, + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + expires_after: nil, + request_options: {} + ) + end + + # Returns information about a specific file. + sig do + params( + file_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::FileObject) + end + def retrieve( + # The ID of the file to use for this request. + file_id, + request_options: {} + ) + end + + # Returns a list of files. + sig do + params( + after: String, + limit: Integer, + order: OpenAI::FileListParams::Order::OrSymbol, + purpose: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Internal::CursorPage[OpenAI::FileObject]) + end + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 10,000, and the default is 10,000. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + # Only return files with the given purpose. + purpose: nil, + request_options: {} + ) + end + + # Delete a file. + sig do + params( + file_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::FileDeleted) + end + def delete( + # The ID of the file to use for this request. + file_id, + request_options: {} + ) + end + + # Returns the contents of the specified file. + sig do + params( + file_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(StringIO) + end + def content( + # The ID of the file to use for this request. + file_id, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/fine_tuning.rbi b/rbi/openai/resources/fine_tuning.rbi new file mode 100644 index 00000000..42e53e48 --- /dev/null +++ b/rbi/openai/resources/fine_tuning.rbi @@ -0,0 +1,24 @@ +# typed: strong + +module OpenAI + module Resources + class FineTuning + sig { returns(OpenAI::Resources::FineTuning::Methods) } + attr_reader :methods_ + + sig { returns(OpenAI::Resources::FineTuning::Jobs) } + attr_reader :jobs + + sig { returns(OpenAI::Resources::FineTuning::Checkpoints) } + attr_reader :checkpoints + + sig { returns(OpenAI::Resources::FineTuning::Alpha) } + attr_reader :alpha + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/fine_tuning/alpha.rbi b/rbi/openai/resources/fine_tuning/alpha.rbi new file mode 100644 index 00000000..86bfd743 --- /dev/null +++ b/rbi/openai/resources/fine_tuning/alpha.rbi @@ -0,0 +1,17 @@ +# typed: strong + +module OpenAI + module Resources + class FineTuning + class Alpha + sig { returns(OpenAI::Resources::FineTuning::Alpha::Graders) } + attr_reader :graders + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/fine_tuning/alpha/graders.rbi b/rbi/openai/resources/fine_tuning/alpha/graders.rbi new file mode 100644 index 00000000..f3ea09fe --- /dev/null +++ b/rbi/openai/resources/fine_tuning/alpha/graders.rbi @@ -0,0 +1,70 @@ +# typed: strong + +module OpenAI + module Resources + class FineTuning + class Alpha + class Graders + # Run a grader. + sig do + params( + grader: + T.any( + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::Graders::TextSimilarityGrader::OrHash, + OpenAI::Graders::PythonGrader::OrHash, + OpenAI::Graders::ScoreModelGrader::OrHash, + OpenAI::Graders::MultiGrader::OrHash + ), + model_sample: String, + item: T.anything, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::FineTuning::Alpha::GraderRunResponse) + end + def run( + # The grader used for the fine-tuning job. + grader:, + # The model sample to be evaluated. This value will be used to populate the + # `sample` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + # The `output_json` variable will be populated if the model sample is a valid JSON + # string. + model_sample:, + # The dataset item provided to the grader. This will be used to populate the + # `item` namespace. See + # [the guide](https://platform.openai.com/docs/guides/graders) for more details. + item: nil, + request_options: {} + ) + end + + # Validate a grader. + sig do + params( + grader: + T.any( + OpenAI::Graders::StringCheckGrader::OrHash, + OpenAI::Graders::TextSimilarityGrader::OrHash, + OpenAI::Graders::PythonGrader::OrHash, + OpenAI::Graders::ScoreModelGrader::OrHash, + OpenAI::Graders::MultiGrader::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::FineTuning::Alpha::GraderValidateResponse) + end + def validate( + # The grader used for the fine-tuning job. + grader:, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end + end +end diff --git a/rbi/openai/resources/fine_tuning/checkpoints.rbi b/rbi/openai/resources/fine_tuning/checkpoints.rbi new file mode 100644 index 00000000..ca0f882d --- /dev/null +++ b/rbi/openai/resources/fine_tuning/checkpoints.rbi @@ -0,0 +1,17 @@ +# typed: strong + +module OpenAI + module Resources + class FineTuning + class Checkpoints + sig { returns(OpenAI::Resources::FineTuning::Checkpoints::Permissions) } + attr_reader :permissions + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi b/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi new file mode 100644 index 00000000..be76789c --- /dev/null +++ b/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi @@ -0,0 +1,94 @@ +# typed: strong + +module OpenAI + module Resources + class FineTuning + class Checkpoints + class Permissions + # **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). + # + # This enables organization owners to share fine-tuned models with other projects + # in their organization. + sig do + params( + fine_tuned_model_checkpoint: String, + project_ids: T::Array[String], + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Page[ + OpenAI::Models::FineTuning::Checkpoints::PermissionCreateResponse + ] + ) + end + def create( + # The ID of the fine-tuned model checkpoint to create a permission for. + fine_tuned_model_checkpoint, + # The project identifiers to grant access to. + project_ids:, + request_options: {} + ) + end + + # **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + # + # Organization owners can use this endpoint to view all permissions for a + # fine-tuned model checkpoint. + sig do + params( + fine_tuned_model_checkpoint: String, + after: String, + limit: Integer, + order: + OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order::OrSymbol, + project_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse + ) + end + def retrieve( + # The ID of the fine-tuned model checkpoint to get permissions for. + fine_tuned_model_checkpoint, + # Identifier for the last permission ID from the previous pagination request. + after: nil, + # Number of permissions to retrieve. + limit: nil, + # The order in which to retrieve permissions. + order: nil, + # The ID of the project to get permissions for. + project_id: nil, + request_options: {} + ) + end + + # **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + # + # Organization owners can use this endpoint to delete a permission for a + # fine-tuned model checkpoint. + sig do + params( + permission_id: String, + fine_tuned_model_checkpoint: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteResponse + ) + end + def delete( + # The ID of the fine-tuned model checkpoint permission to delete. + permission_id, + # The ID of the fine-tuned model checkpoint to delete a permission for. + fine_tuned_model_checkpoint:, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end + end +end diff --git a/rbi/openai/resources/fine_tuning/jobs.rbi b/rbi/openai/resources/fine_tuning/jobs.rbi new file mode 100644 index 00000000..4e823288 --- /dev/null +++ b/rbi/openai/resources/fine_tuning/jobs.rbi @@ -0,0 +1,216 @@ +# typed: strong + +module OpenAI + module Resources + class FineTuning + class Jobs + sig { returns(OpenAI::Resources::FineTuning::Jobs::Checkpoints) } + attr_reader :checkpoints + + # Creates a fine-tuning job which begins the process of creating a new model from + # a given dataset. + # + # Response includes details of the enqueued job including job status and the name + # of the fine-tuned models once complete. + # + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) + sig do + params( + model: + T.any( + String, + OpenAI::FineTuning::JobCreateParams::Model::OrSymbol + ), + training_file: String, + hyperparameters: + OpenAI::FineTuning::JobCreateParams::Hyperparameters::OrHash, + integrations: + T.nilable( + T::Array[ + OpenAI::FineTuning::JobCreateParams::Integration::OrHash + ] + ), + metadata: T.nilable(T::Hash[Symbol, String]), + method_: OpenAI::FineTuning::JobCreateParams::Method::OrHash, + seed: T.nilable(Integer), + suffix: T.nilable(String), + validation_file: T.nilable(String), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::FineTuning::FineTuningJob) + end + def create( + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + model:, + # The ID of an uploaded file that contains training data. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. + # + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. + # + # See the + # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) + # for more details. + training_file:, + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. + hyperparameters: nil, + # A list of integrations to enable for your fine-tuning job. + integrations: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The method used for fine-tuning. + method_: nil, + # The seed controls the reproducibility of the job. Passing in the same seed and + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. + seed: nil, + # A string of up to 64 characters that will be added to your fine-tuned model + # name. + # + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + suffix: nil, + # The ID of an uploaded file that contains validation data. + # + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. + # + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. + # + # See the + # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) + # for more details. + validation_file: nil, + request_options: {} + ) + end + + # Get info about a fine-tuning job. + # + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) + sig do + params( + fine_tuning_job_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::FineTuning::FineTuningJob) + end + def retrieve( + # The ID of the fine-tuning job. + fine_tuning_job_id, + request_options: {} + ) + end + + # List your organization's fine-tuning jobs + sig do + params( + after: String, + limit: Integer, + metadata: T.nilable(T::Hash[Symbol, String]), + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[OpenAI::FineTuning::FineTuningJob] + ) + end + def list( + # Identifier for the last job from the previous pagination request. + after: nil, + # Number of fine-tuning jobs to retrieve. + limit: nil, + # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + # Alternatively, set `metadata=null` to indicate no metadata. + metadata: nil, + request_options: {} + ) + end + + # Immediately cancel a fine-tune job. + sig do + params( + fine_tuning_job_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::FineTuning::FineTuningJob) + end + def cancel( + # The ID of the fine-tuning job to cancel. + fine_tuning_job_id, + request_options: {} + ) + end + + # Get status updates for a fine-tuning job. + sig do + params( + fine_tuning_job_id: String, + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[OpenAI::FineTuning::FineTuningJobEvent] + ) + end + def list_events( + # The ID of the fine-tuning job to get events for. + fine_tuning_job_id, + # Identifier for the last event from the previous pagination request. + after: nil, + # Number of events to retrieve. + limit: nil, + request_options: {} + ) + end + + # Pause a fine-tune job. + sig do + params( + fine_tuning_job_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::FineTuning::FineTuningJob) + end + def pause( + # The ID of the fine-tuning job to pause. + fine_tuning_job_id, + request_options: {} + ) + end + + # Resume a fine-tune job. + sig do + params( + fine_tuning_job_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::FineTuning::FineTuningJob) + end + def resume( + # The ID of the fine-tuning job to resume. + fine_tuning_job_id, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/fine_tuning/jobs/checkpoints.rbi b/rbi/openai/resources/fine_tuning/jobs/checkpoints.rbi new file mode 100644 index 00000000..0c037db9 --- /dev/null +++ b/rbi/openai/resources/fine_tuning/jobs/checkpoints.rbi @@ -0,0 +1,40 @@ +# typed: strong + +module OpenAI + module Resources + class FineTuning + class Jobs + class Checkpoints + # List checkpoints for a fine-tuning job. + sig do + params( + fine_tuning_job_id: String, + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[ + OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint + ] + ) + end + def list( + # The ID of the fine-tuning job to get checkpoints for. + fine_tuning_job_id, + # Identifier for the last checkpoint ID from the previous pagination request. + after: nil, + # Number of checkpoints to retrieve. + limit: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end + end +end diff --git a/rbi/openai/resources/fine_tuning/methods.rbi b/rbi/openai/resources/fine_tuning/methods.rbi new file mode 100644 index 00000000..a03708e5 --- /dev/null +++ b/rbi/openai/resources/fine_tuning/methods.rbi @@ -0,0 +1,14 @@ +# typed: strong + +module OpenAI + module Resources + class FineTuning + class Methods + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/graders.rbi b/rbi/openai/resources/graders.rbi new file mode 100644 index 00000000..b409a493 --- /dev/null +++ b/rbi/openai/resources/graders.rbi @@ -0,0 +1,15 @@ +# typed: strong + +module OpenAI + module Resources + class Graders + sig { returns(OpenAI::Resources::Graders::GraderModels) } + attr_reader :grader_models + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/graders/grader_models.rbi b/rbi/openai/resources/graders/grader_models.rbi new file mode 100644 index 00000000..bc4bcd24 --- /dev/null +++ b/rbi/openai/resources/graders/grader_models.rbi @@ -0,0 +1,14 @@ +# typed: strong + +module OpenAI + module Resources + class Graders + class GraderModels + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/images.rbi b/rbi/openai/resources/images.rbi new file mode 100644 index 00000000..b27c7c15 --- /dev/null +++ b/rbi/openai/resources/images.rbi @@ -0,0 +1,453 @@ +# typed: strong + +module OpenAI + module Resources + class Images + # Creates a variation of a given image. This endpoint only supports `dall-e-2`. + sig do + params( + image: OpenAI::Internal::FileInput, + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + n: T.nilable(Integer), + response_format: + T.nilable( + OpenAI::ImageCreateVariationParams::ResponseFormat::OrSymbol + ), + size: T.nilable(OpenAI::ImageCreateVariationParams::Size::OrSymbol), + user: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::ImagesResponse) + end + def create_variation( + # The image to use as the basis for the variation(s). Must be a valid PNG file, + # less than 4MB, and square. + image:, + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. + model: nil, + # The number of images to generate. Must be between 1 and 10. + n: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. + response_format: nil, + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. + size: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + request_options: {} + ) + end + + # See {OpenAI::Resources::Images#edit_stream_raw} for streaming counterpart. + # + # Creates an edited or extended image given one or more source images and a + # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. + sig do + params( + image: OpenAI::ImageEditParams::Image::Variants, + prompt: String, + background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol), + input_fidelity: + T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol), + mask: OpenAI::Internal::FileInput, + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + n: T.nilable(Integer), + output_compression: T.nilable(Integer), + output_format: + T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), + quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol), + response_format: + T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol), + size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol), + user: String, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::ImagesResponse) + end + def edit( + # The image(s) to edit. Must be a supported image file or an array of images. + # + # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + # 50MB. You can provide up to 16 images. + # + # For `dall-e-2`, you can only provide one image, and it should be a square `png` + # file less than 4MB. + image:, + # A text description of the desired image(s). The maximum length is 1000 + # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + prompt:, + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + background: nil, + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: nil, + # An additional image whose fully transparent areas (e.g. where alpha is zero) + # indicate where `image` should be edited. If there are multiple images provided, + # the mask will be applied on the first image. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. + mask: nil, + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + # is used. + model: nil, + # The number of images to generate. Must be between 1 and 10. + n: nil, + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + output_compression: nil, + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + # default value is `png`. + output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # Note that the final image may be sent before the full number of partial images + # are generated if the full image is generated more quickly. + partial_images: nil, + # The quality of the image that will be generated. `high`, `medium` and `low` are + # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + # Defaults to `auto`. + quality: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + # will always return base64-encoded images. + response_format: nil, + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + size: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + # There is no need to provide `stream:`. Instead, use `#edit_stream_raw` or + # `#edit` for streaming and non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Images#edit} for non-streaming counterpart. + # + # Creates an edited or extended image given one or more source images and a + # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. + sig do + params( + image: OpenAI::ImageEditParams::Image::Variants, + prompt: String, + background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol), + input_fidelity: + T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol), + mask: OpenAI::Internal::FileInput, + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + n: T.nilable(Integer), + output_compression: T.nilable(Integer), + output_format: + T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), + quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol), + response_format: + T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol), + size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol), + user: String, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Stream[OpenAI::ImageEditStreamEvent::Variants] + ) + end + def edit_stream_raw( + # The image(s) to edit. Must be a supported image file or an array of images. + # + # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + # 50MB. You can provide up to 16 images. + # + # For `dall-e-2`, you can only provide one image, and it should be a square `png` + # file less than 4MB. + image:, + # A text description of the desired image(s). The maximum length is 1000 + # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + prompt:, + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + background: nil, + # Control how much effort the model will exert to match the style and features, + # especially facial features, of input images. This parameter is only supported + # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + input_fidelity: nil, + # An additional image whose fully transparent areas (e.g. where alpha is zero) + # indicate where `image` should be edited. If there are multiple images provided, + # the mask will be applied on the first image. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. + mask: nil, + # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + # is used. + model: nil, + # The number of images to generate. Must be between 1 and 10. + n: nil, + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + output_compression: nil, + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + # default value is `png`. + output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # Note that the final image may be sent before the full number of partial images + # are generated if the full image is generated more quickly. + partial_images: nil, + # The quality of the image that will be generated. `high`, `medium` and `low` are + # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + # Defaults to `auto`. + quality: nil, + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + # will always return base64-encoded images. + response_format: nil, + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + size: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + # There is no need to provide `stream:`. Instead, use `#edit_stream_raw` or + # `#edit` for streaming and non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # See {OpenAI::Resources::Images#generate_stream_raw} for streaming counterpart. + # + # Creates an image given a prompt. + # [Learn more](https://platform.openai.com/docs/guides/images). + sig do + params( + prompt: String, + background: + T.nilable(OpenAI::ImageGenerateParams::Background::OrSymbol), + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + moderation: + T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol), + n: T.nilable(Integer), + output_compression: T.nilable(Integer), + output_format: + T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), + quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol), + response_format: + T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol), + size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol), + style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol), + user: String, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::ImagesResponse) + end + def generate( + # A text description of the desired image(s). The maximum length is 32000 + # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + # for `dall-e-3`. + prompt:, + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + background: nil, + # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + # `gpt-image-1` is used. + model: nil, + # Control the content-moderation level for images generated by `gpt-image-1`. Must + # be either `low` for less restrictive filtering or `auto` (default value). + moderation: nil, + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. + n: nil, + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + output_compression: nil, + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # Note that the final image may be sent before the full number of partial images + # are generated if the full image is generated more quickly. + partial_images: nil, + # The quality of the image that will be generated. + # + # - `auto` (default value) will automatically select the best quality for the + # given model. + # - `high`, `medium` and `low` are supported for `gpt-image-1`. + # - `hd` and `standard` are supported for `dall-e-3`. + # - `standard` is the only option for `dall-e-2`. + quality: nil, + # The format in which generated images with `dall-e-2` and `dall-e-3` are + # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + # after the image has been generated. This parameter isn't supported for + # `gpt-image-1` which will always return base64-encoded images. + response_format: nil, + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + size: nil, + # The style of the generated images. This parameter is only supported for + # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + # towards generating hyper-real and dramatic images. Natural causes the model to + # produce more natural, less hyper-real looking images. + style: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + # There is no need to provide `stream:`. Instead, use `#generate_stream_raw` or + # `#generate` for streaming and non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Images#generate} for non-streaming counterpart. + # + # Creates an image given a prompt. + # [Learn more](https://platform.openai.com/docs/guides/images). + sig do + params( + prompt: String, + background: + T.nilable(OpenAI::ImageGenerateParams::Background::OrSymbol), + model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)), + moderation: + T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol), + n: T.nilable(Integer), + output_compression: T.nilable(Integer), + output_format: + T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol), + partial_images: T.nilable(Integer), + quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol), + response_format: + T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol), + size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol), + style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol), + user: String, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Stream[OpenAI::ImageGenStreamEvent::Variants] + ) + end + def generate_stream_raw( + # A text description of the desired image(s). The maximum length is 32000 + # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + # for `dall-e-3`. + prompt:, + # Allows to set transparency for the background of the generated image(s). This + # parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + # `opaque` or `auto` (default value). When `auto` is used, the model will + # automatically determine the best background for the image. + # + # If `transparent`, the output format needs to support transparency, so it should + # be set to either `png` (default value) or `webp`. + background: nil, + # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + # `gpt-image-1` is used. + model: nil, + # Control the content-moderation level for images generated by `gpt-image-1`. Must + # be either `low` for less restrictive filtering or `auto` (default value). + moderation: nil, + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. + n: nil, + # The compression level (0-100%) for the generated images. This parameter is only + # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + # defaults to 100. + output_compression: nil, + # The format in which the generated images are returned. This parameter is only + # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + output_format: nil, + # The number of partial images to generate. This parameter is used for streaming + # responses that return partial images. Value must be between 0 and 3. When set to + # 0, the response will be a single image sent in one streaming event. + # + # Note that the final image may be sent before the full number of partial images + # are generated if the full image is generated more quickly. + partial_images: nil, + # The quality of the image that will be generated. + # + # - `auto` (default value) will automatically select the best quality for the + # given model. + # - `high`, `medium` and `low` are supported for `gpt-image-1`. + # - `hd` and `standard` are supported for `dall-e-3`. + # - `standard` is the only option for `dall-e-2`. + quality: nil, + # The format in which generated images with `dall-e-2` and `dall-e-3` are + # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + # after the image has been generated. This parameter isn't supported for + # `gpt-image-1` which will always return base64-encoded images. + response_format: nil, + # The size of the generated images. Must be one of `1024x1024`, `1536x1024` + # (landscape), `1024x1536` (portrait), or `auto` (default value) for + # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + size: nil, + # The style of the generated images. This parameter is only supported for + # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + # towards generating hyper-real and dramatic images. Natural causes the model to + # produce more natural, less hyper-real looking images. + style: nil, + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: nil, + # There is no need to provide `stream:`. Instead, use `#generate_stream_raw` or + # `#generate` for streaming and non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/models.rbi b/rbi/openai/resources/models.rbi new file mode 100644 index 00000000..bfb67109 --- /dev/null +++ b/rbi/openai/resources/models.rbi @@ -0,0 +1,52 @@ +# typed: strong + +module OpenAI + module Resources + class Models + # Retrieves a model instance, providing basic information about the model such as + # the owner and permissioning. + sig do + params( + model: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Model) + end + def retrieve( + # The ID of the model to use for this request + model, + request_options: {} + ) + end + + # Lists the currently available models, and provides basic information about each + # one such as the owner and availability. + sig do + params(request_options: OpenAI::RequestOptions::OrHash).returns( + OpenAI::Internal::Page[OpenAI::Model] + ) + end + def list(request_options: {}) + end + + # Delete a fine-tuned model. You must have the Owner role in your organization to + # delete a model. + sig do + params( + model: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::ModelDeleted) + end + def delete( + # The model to delete + model, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/moderations.rbi b/rbi/openai/resources/moderations.rbi new file mode 100644 index 00000000..38d862ca --- /dev/null +++ b/rbi/openai/resources/moderations.rbi @@ -0,0 +1,34 @@ +# typed: strong + +module OpenAI + module Resources + class Moderations + # Classifies if text and/or image inputs are potentially harmful. Learn more in + # the [moderation guide](https://platform.openai.com/docs/guides/moderation). + sig do + params( + input: OpenAI::ModerationCreateParams::Input::Variants, + model: T.any(String, OpenAI::ModerationModel::OrSymbol), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Models::ModerationCreateResponse) + end + def create( + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. + input:, + # The content moderation model you would like to use. Learn more in + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). + model: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/responses.rbi b/rbi/openai/resources/responses.rbi new file mode 100644 index 00000000..743d1278 --- /dev/null +++ b/rbi/openai/resources/responses.rbi @@ -0,0 +1,645 @@ +# typed: strong + +module OpenAI + module Resources + class Responses + sig { returns(OpenAI::Resources::Responses::InputItems) } + attr_reader :input_items + + # See {OpenAI::Resources::Responses#stream_raw} for streaming counterpart. + # + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. + sig do + params( + background: T.nilable(T::Boolean), + conversation: + T.nilable( + T.any( + String, + OpenAI::Responses::ResponseConversationParam::OrHash + ) + ), + include: + T.nilable( + T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] + ), + input: OpenAI::Responses::ResponseCreateParams::Input::Variants, + instructions: T.nilable(String), + max_output_tokens: T.nilable(Integer), + max_tool_calls: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: + T.any( + String, + OpenAI::ChatModel::OrSymbol, + OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol + ), + parallel_tool_calls: T.nilable(T::Boolean), + previous_response_id: T.nilable(String), + prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash), + prompt_cache_key: String, + reasoning: T.nilable(OpenAI::Reasoning::OrHash), + safety_identifier: String, + service_tier: + T.nilable( + OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol + ), + store: T.nilable(T::Boolean), + stream_options: + T.nilable( + OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash + ), + temperature: T.nilable(Float), + text: OpenAI::Responses::ResponseTextConfig::OrHash, + tool_choice: + T.any( + OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, + OpenAI::Responses::ToolChoiceTypes::OrHash, + OpenAI::Responses::ToolChoiceFunction::OrHash, + OpenAI::Responses::ToolChoiceMcp::OrHash, + OpenAI::Responses::ToolChoiceCustom::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ], + top_logprobs: T.nilable(Integer), + top_p: T.nilable(Float), + truncation: + T.nilable( + OpenAI::Responses::ResponseCreateParams::Truncation::OrSymbol + ), + user: String, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Responses::Response) + end + def create( + # Whether to run the model response in the background. + # [Learn more](https://platform.openai.com/docs/guides/background). + background: nil, + # The conversation that this response belongs to. Items from this conversation are + # prepended to `input_items` for this response request. Input items and output + # items from this response are automatically added to this conversation after this + # response completes. + conversation: nil, + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). + include: nil, + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + input: nil, + # A system (or developer) message inserted into the model's context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. + instructions: nil, + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_output_tokens: nil, + # The maximum number of total calls to built-in tools that can be processed in a + # response. This maximum number applies across all built-in tool calls, not per + # individual tool. Any further attempts to call a tool by the model will be + # ignored. + max_tool_calls: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + model: nil, + # Whether to allow the model to run tool calls in parallel. + parallel_tool_calls: nil, + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # Cannot be used in conjunction with `conversation`. + previous_response_id: nil, + # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt: nil, + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_key: nil, + # **gpt-5 and o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + reasoning: nil, + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + safety_identifier: nil, + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + service_tier: nil, + # Whether to store the generated model response for later retrieval via API. + store: nil, + # Options for streaming responses. Only set this when you set `stream: true`. + stream_options: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. + temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + text: nil, + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. + tool_choice: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. + tools: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + top_p: nil, + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. + truncation: nil, + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + user: nil, + # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create` + # for streaming and non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Responses#create} for non-streaming counterpart. + # + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. + sig do + params( + background: T.nilable(T::Boolean), + conversation: + T.nilable( + T.any( + String, + OpenAI::Responses::ResponseConversationParam::OrHash + ) + ), + include: + T.nilable( + T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] + ), + input: OpenAI::Responses::ResponseCreateParams::Input::Variants, + instructions: T.nilable(String), + max_output_tokens: T.nilable(Integer), + max_tool_calls: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), + model: + T.any( + String, + OpenAI::ChatModel::OrSymbol, + OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol + ), + parallel_tool_calls: T.nilable(T::Boolean), + previous_response_id: T.nilable(String), + prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash), + prompt_cache_key: String, + reasoning: T.nilable(OpenAI::Reasoning::OrHash), + safety_identifier: String, + service_tier: + T.nilable( + OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol + ), + store: T.nilable(T::Boolean), + stream_options: + T.nilable( + OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash + ), + temperature: T.nilable(Float), + text: OpenAI::Responses::ResponseTextConfig::OrHash, + tool_choice: + T.any( + OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, + OpenAI::Responses::ToolChoiceTypes::OrHash, + OpenAI::Responses::ToolChoiceFunction::OrHash, + OpenAI::Responses::ToolChoiceMcp::OrHash, + OpenAI::Responses::ToolChoiceCustom::OrHash + ), + tools: + T::Array[ + T.any( + OpenAI::Responses::FunctionTool::OrHash, + OpenAI::Responses::FileSearchTool::OrHash, + OpenAI::Responses::ComputerTool::OrHash, + OpenAI::Responses::Tool::Mcp::OrHash, + OpenAI::Responses::Tool::CodeInterpreter::OrHash, + OpenAI::Responses::Tool::ImageGeneration::OrHash, + OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, + OpenAI::Responses::WebSearchTool::OrHash + ) + ], + top_logprobs: T.nilable(Integer), + top_p: T.nilable(Float), + truncation: + T.nilable( + OpenAI::Responses::ResponseCreateParams::Truncation::OrSymbol + ), + user: String, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Stream[ + OpenAI::Responses::ResponseStreamEvent::Variants + ] + ) + end + def stream_raw( + # Whether to run the model response in the background. + # [Learn more](https://platform.openai.com/docs/guides/background). + background: nil, + # The conversation that this response belongs to. Items from this conversation are + # prepended to `input_items` for this response request. Input items and output + # items from this response are automatically added to this conversation after this + # response completes. + conversation: nil, + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). + include: nil, + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + input: nil, + # A system (or developer) message inserted into the model's context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. + instructions: nil, + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_output_tokens: nil, + # The maximum number of total calls to built-in tools that can be processed in a + # response. This maximum number applies across all built-in tool calls, not per + # individual tool. Any further attempts to call a tool by the model will be + # ignored. + max_tool_calls: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + model: nil, + # Whether to allow the model to run tool calls in parallel. + parallel_tool_calls: nil, + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). + # Cannot be used in conjunction with `conversation`. + previous_response_id: nil, + # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt: nil, + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_key: nil, + # **gpt-5 and o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). + reasoning: nil, + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + safety_identifier: nil, + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # '[priority](https://openai.com/api-priority-processing/)', then the request + # will be processed with the corresponding service tier. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. + service_tier: nil, + # Whether to store the generated model response for later retrieval via API. + store: nil, + # Options for streaming responses. Only set this when you set `stream: true`. + stream_options: nil, + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. + temperature: nil, + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + text: nil, + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. + tool_choice: nil, + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. + tools: nil, + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. + top_p: nil, + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. + truncation: nil, + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + user: nil, + # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create` + # for streaming and non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # See {OpenAI::Resources::Responses#retrieve_streaming} for streaming counterpart. + # + # Retrieves a model response with the given ID. + sig do + params( + response_id: String, + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + include_obfuscation: T::Boolean, + starting_after: Integer, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Responses::Response) + end + def retrieve( + # The ID of the response to retrieve. + response_id, + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. + include: nil, + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + include_obfuscation: nil, + # The sequence number of the event after which to start streaming. + starting_after: nil, + # There is no need to provide `stream:`. Instead, use `#retrieve_streaming` or + # `#retrieve` for streaming and non-streaming use cases, respectively. + stream: false, + request_options: {} + ) + end + + # See {OpenAI::Resources::Responses#retrieve} for non-streaming counterpart. + # + # Retrieves a model response with the given ID. + sig do + params( + response_id: String, + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + include_obfuscation: T::Boolean, + starting_after: Integer, + stream: T.noreturn, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Stream[ + OpenAI::Responses::ResponseStreamEvent::Variants + ] + ) + end + def retrieve_streaming( + # The ID of the response to retrieve. + response_id, + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. + include: nil, + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + include_obfuscation: nil, + # The sequence number of the event after which to start streaming. + starting_after: nil, + # There is no need to provide `stream:`. Instead, use `#retrieve_streaming` or + # `#retrieve` for streaming and non-streaming use cases, respectively. + stream: true, + request_options: {} + ) + end + + # Deletes a model response with the given ID. + sig do + params( + response_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).void + end + def delete( + # The ID of the response to delete. + response_id, + request_options: {} + ) + end + + # Cancels a model response with the given ID. Only responses created with the + # `background` parameter set to `true` can be cancelled. + # [Learn more](https://platform.openai.com/docs/guides/background). + sig do + params( + response_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Responses::Response) + end + def cancel( + # The ID of the response to cancel. + response_id, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/responses/input_items.rbi b/rbi/openai/resources/responses/input_items.rbi new file mode 100644 index 00000000..5875866b --- /dev/null +++ b/rbi/openai/resources/responses/input_items.rbi @@ -0,0 +1,49 @@ +# typed: strong + +module OpenAI + module Resources + class Responses + class InputItems + # Returns a list of input items for a given response. + sig do + params( + response_id: String, + after: String, + include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + limit: Integer, + order: OpenAI::Responses::InputItemListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[ + OpenAI::Responses::ResponseItem::Variants + ] + ) + end + def list( + # The ID of the response to retrieve input items for. + response_id, + # An item ID to list items after, used in pagination. + after: nil, + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. + include: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # The order to return the input items in. Default is `desc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. + order: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/uploads.rbi b/rbi/openai/resources/uploads.rbi new file mode 100644 index 00000000..1a0f93c2 --- /dev/null +++ b/rbi/openai/resources/uploads.rbi @@ -0,0 +1,113 @@ +# typed: strong + +module OpenAI + module Resources + class Uploads + sig { returns(OpenAI::Resources::Uploads::Parts) } + attr_reader :parts + + # Creates an intermediate + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object + # that you can add + # [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. + # Currently, an Upload can accept at most 8 GB in total and expires after an hour + # after you create it. + # + # Once you complete the Upload, we will create a + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # contains all the parts you uploaded. This File is usable in the rest of our + # platform as a regular File object. + # + # For certain `purpose` values, the correct `mime_type` must be specified. Please + # refer to documentation for the + # [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). + # + # For guidance on the proper filename extensions for each purpose, please follow + # the documentation on + # [creating a File](https://platform.openai.com/docs/api-reference/files/create). + sig do + params( + bytes: Integer, + filename: String, + mime_type: String, + purpose: OpenAI::FilePurpose::OrSymbol, + expires_after: OpenAI::UploadCreateParams::ExpiresAfter::OrHash, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Upload) + end + def create( + # The number of bytes in the file you are uploading. + bytes:, + # The name of the file to upload. + filename:, + # The MIME type of the file. + # + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. + mime_type:, + # The intended purpose of the uploaded file. + # + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + purpose:, + # The expiration policy for a file. By default, files with `purpose=batch` expire + # after 30 days and all other files are persisted until they are manually deleted. + expires_after: nil, + request_options: {} + ) + end + + # Cancels the Upload. No Parts may be added after an Upload is cancelled. + sig do + params( + upload_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Upload) + end + def cancel( + # The ID of the Upload. + upload_id, + request_options: {} + ) + end + + # Completes the + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object). + # + # Within the returned Upload object, there is a nested + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # is ready to use in the rest of the platform. + # + # You can specify the order of the Parts by passing in an ordered list of the Part + # IDs. + # + # The number of bytes uploaded upon completion must match the number of bytes + # initially specified when creating the Upload object. No Parts may be added after + # an Upload is completed. + sig do + params( + upload_id: String, + part_ids: T::Array[String], + md5: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Upload) + end + def complete( + # The ID of the Upload. + upload_id, + # The ordered list of Part IDs. + part_ids:, + # The optional md5 checksum for the file contents to verify if the bytes uploaded + # matches what you expect. + md5: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/uploads/parts.rbi b/rbi/openai/resources/uploads/parts.rbi new file mode 100644 index 00000000..055b46b9 --- /dev/null +++ b/rbi/openai/resources/uploads/parts.rbi @@ -0,0 +1,41 @@ +# typed: strong + +module OpenAI + module Resources + class Uploads + class Parts + # Adds a + # [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. + # A Part represents a chunk of bytes from the file you are trying to upload. + # + # Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + # maximum of 8 GB. + # + # It is possible to add multiple Parts in parallel. You can decide the intended + # order of the Parts when you + # [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). + sig do + params( + upload_id: String, + data: OpenAI::Internal::FileInput, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Uploads::UploadPart) + end + def create( + # The ID of the Upload. + upload_id, + # The chunk of bytes for this Part. + data:, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/vector_stores.rbi b/rbi/openai/resources/vector_stores.rbi new file mode 100644 index 00000000..727abd63 --- /dev/null +++ b/rbi/openai/resources/vector_stores.rbi @@ -0,0 +1,182 @@ +# typed: strong + +module OpenAI + module Resources + class VectorStores + sig { returns(OpenAI::Resources::VectorStores::Files) } + attr_reader :files + + sig { returns(OpenAI::Resources::VectorStores::FileBatches) } + attr_reader :file_batches + + # Create a vector store. + sig do + params( + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ), + expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter::OrHash, + file_ids: T::Array[String], + metadata: T.nilable(T::Hash[Symbol, String]), + name: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStore) + end + def create( + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil, + # The expiration policy for a vector store. + expires_after: nil, + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. + file_ids: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the vector store. + name: nil, + request_options: {} + ) + end + + # Retrieves a vector store. + sig do + params( + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStore) + end + def retrieve( + # The ID of the vector store to retrieve. + vector_store_id, + request_options: {} + ) + end + + # Modifies a vector store. + sig do + params( + vector_store_id: String, + expires_after: + T.nilable(OpenAI::VectorStoreUpdateParams::ExpiresAfter::OrHash), + metadata: T.nilable(T::Hash[Symbol, String]), + name: T.nilable(String), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStore) + end + def update( + # The ID of the vector store to modify. + vector_store_id, + # The expiration policy for a vector store. + expires_after: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. + metadata: nil, + # The name of the vector store. + name: nil, + request_options: {} + ) + end + + # Returns a list of vector stores. + sig do + params( + after: String, + before: String, + limit: Integer, + order: OpenAI::VectorStoreListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::Internal::CursorPage[OpenAI::VectorStore]) + end + def list( + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + # Delete a vector store. + sig do + params( + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStoreDeleted) + end + def delete( + # The ID of the vector store to delete. + vector_store_id, + request_options: {} + ) + end + + # Search a vector store for relevant chunks based on a query and file attributes + # filter. + sig do + params( + vector_store_id: String, + query: OpenAI::VectorStoreSearchParams::Query::Variants, + filters: + T.any( + OpenAI::ComparisonFilter::OrHash, + OpenAI::CompoundFilter::OrHash + ), + max_num_results: Integer, + ranking_options: + OpenAI::VectorStoreSearchParams::RankingOptions::OrHash, + rewrite_query: T::Boolean, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Page[OpenAI::Models::VectorStoreSearchResponse] + ) + end + def search( + # The ID of the vector store to search. + vector_store_id, + # A query string for a search + query:, + # A filter to apply based on file attributes. + filters: nil, + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. + max_num_results: nil, + # Ranking options for search. + ranking_options: nil, + # Whether to rewrite the natural language query for vector search. + rewrite_query: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/resources/vector_stores/file_batches.rbi b/rbi/openai/resources/vector_stores/file_batches.rbi new file mode 100644 index 00000000..c6aca892 --- /dev/null +++ b/rbi/openai/resources/vector_stores/file_batches.rbi @@ -0,0 +1,134 @@ +# typed: strong + +module OpenAI + module Resources + class VectorStores + class FileBatches + # Create a vector store file batch. + sig do + params( + vector_store_id: String, + file_ids: T::Array[String], + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileBatchCreateParams::Attribute::Variants + ] + ), + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStores::VectorStoreFileBatch) + end + def create( + # The ID of the vector store for which to create a File Batch. + vector_store_id, + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. + file_ids:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil, + request_options: {} + ) + end + + # Retrieves a vector store file batch. + sig do + params( + batch_id: String, + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStores::VectorStoreFileBatch) + end + def retrieve( + # The ID of the file batch being retrieved. + batch_id, + # The ID of the vector store that the file batch belongs to. + vector_store_id:, + request_options: {} + ) + end + + # Cancel a vector store file batch. This attempts to cancel the processing of + # files in this batch as soon as possible. + sig do + params( + batch_id: String, + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStores::VectorStoreFileBatch) + end + def cancel( + # The ID of the file batch to cancel. + batch_id, + # The ID of the vector store that the file batch belongs to. + vector_store_id:, + request_options: {} + ) + end + + # Returns a list of vector store files in a batch. + sig do + params( + batch_id: String, + vector_store_id: String, + after: String, + before: String, + filter: + OpenAI::VectorStores::FileBatchListFilesParams::Filter::OrSymbol, + limit: Integer, + order: + OpenAI::VectorStores::FileBatchListFilesParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[OpenAI::VectorStores::VectorStoreFile] + ) + end + def list_files( + # Path param: The ID of the file batch that the files belong to. + batch_id, + # Path param: The ID of the vector store that the files belong to. + vector_store_id:, + # Query param: A cursor for use in pagination. `after` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, ending with obj_foo, your subsequent call can include + # after=obj_foo in order to fetch the next page of the list. + after: nil, + # Query param: A cursor for use in pagination. `before` is an object ID that + # defines your place in the list. For instance, if you make a list request and + # receive 100 objects, starting with obj_foo, your subsequent call can include + # before=obj_foo in order to fetch the previous page of the list. + before: nil, + # Query param: Filter by file status. One of `in_progress`, `completed`, `failed`, + # `cancelled`. + filter: nil, + # Query param: A limit on the number of objects to be returned. Limit can range + # between 1 and 100, and the default is 20. + limit: nil, + # Query param: Sort order by the `created_at` timestamp of the objects. `asc` for + # ascending order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/vector_stores/files.rbi b/rbi/openai/resources/vector_stores/files.rbi new file mode 100644 index 00000000..711c88f0 --- /dev/null +++ b/rbi/openai/resources/vector_stores/files.rbi @@ -0,0 +1,183 @@ +# typed: strong + +module OpenAI + module Resources + class VectorStores + class Files + # Create a vector store file by attaching a + # [File](https://platform.openai.com/docs/api-reference/files) to a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). + sig do + params( + vector_store_id: String, + file_id: String, + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileCreateParams::Attribute::Variants + ] + ), + chunking_strategy: + T.any( + OpenAI::AutoFileChunkingStrategyParam::OrHash, + OpenAI::StaticFileChunkingStrategyObjectParam::OrHash + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStores::VectorStoreFile) + end + def create( + # The ID of the vector store for which to create a File. + vector_store_id, + # A [File](https://platform.openai.com/docs/api-reference/files) ID that the + # vector store should use. Useful for tools like `file_search` that can access + # files. + file_id:, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. + attributes: nil, + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. + chunking_strategy: nil, + request_options: {} + ) + end + + # Retrieves a vector store file. + sig do + params( + file_id: String, + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStores::VectorStoreFile) + end + def retrieve( + # The ID of the file being retrieved. + file_id, + # The ID of the vector store that the file belongs to. + vector_store_id:, + request_options: {} + ) + end + + # Update attributes on a vector store file. + sig do + params( + file_id: String, + vector_store_id: String, + attributes: + T.nilable( + T::Hash[ + Symbol, + OpenAI::VectorStores::FileUpdateParams::Attribute::Variants + ] + ), + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStores::VectorStoreFile) + end + def update( + # Path param: The ID of the file to update attributes. + file_id, + # Path param: The ID of the vector store the file belongs to. + vector_store_id:, + # Body param: Set of 16 key-value pairs that can be attached to an object. This + # can be useful for storing additional information about the object in a + # structured format, and querying for objects via API or the dashboard. Keys are + # strings with a maximum length of 64 characters. Values are strings with a + # maximum length of 512 characters, booleans, or numbers. + attributes:, + request_options: {} + ) + end + + # Returns a list of vector store files. + sig do + params( + vector_store_id: String, + after: String, + before: String, + filter: OpenAI::VectorStores::FileListParams::Filter::OrSymbol, + limit: Integer, + order: OpenAI::VectorStores::FileListParams::Order::OrSymbol, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::CursorPage[OpenAI::VectorStores::VectorStoreFile] + ) + end + def list( + # The ID of the vector store that the files belong to. + vector_store_id, + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. + after: nil, + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. + before: nil, + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + filter: nil, + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. + limit: nil, + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. + order: nil, + request_options: {} + ) + end + + # Delete a vector store file. This will remove the file from the vector store but + # the file itself will not be deleted. To delete the file, use the + # [delete file](https://platform.openai.com/docs/api-reference/files/delete) + # endpoint. + sig do + params( + file_id: String, + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns(OpenAI::VectorStores::VectorStoreFileDeleted) + end + def delete( + # The ID of the file to delete. + file_id, + # The ID of the vector store that the file belongs to. + vector_store_id:, + request_options: {} + ) + end + + # Retrieve the parsed contents of a vector store file. + sig do + params( + file_id: String, + vector_store_id: String, + request_options: OpenAI::RequestOptions::OrHash + ).returns( + OpenAI::Internal::Page[ + OpenAI::Models::VectorStores::FileContentResponse + ] + ) + end + def content( + # The ID of the file within the vector store. + file_id, + # The ID of the vector store. + vector_store_id:, + request_options: {} + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end + end +end diff --git a/rbi/openai/resources/webhooks.rbi b/rbi/openai/resources/webhooks.rbi new file mode 100644 index 00000000..c5ac27ec --- /dev/null +++ b/rbi/openai/resources/webhooks.rbi @@ -0,0 +1,39 @@ +# typed: strong + +module OpenAI + module Resources + class Webhooks + # Validates that the given payload was sent by OpenAI and parses the payload. + sig do + params(payload: String).returns( + T.any( + OpenAI::Webhooks::BatchCancelledWebhookEvent, + OpenAI::Webhooks::BatchCompletedWebhookEvent, + OpenAI::Webhooks::BatchExpiredWebhookEvent, + OpenAI::Webhooks::BatchFailedWebhookEvent, + OpenAI::Webhooks::EvalRunCanceledWebhookEvent, + OpenAI::Webhooks::EvalRunFailedWebhookEvent, + OpenAI::Webhooks::EvalRunSucceededWebhookEvent, + OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent, + OpenAI::Webhooks::FineTuningJobFailedWebhookEvent, + OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent, + OpenAI::Webhooks::ResponseCancelledWebhookEvent, + OpenAI::Webhooks::ResponseCompletedWebhookEvent, + OpenAI::Webhooks::ResponseFailedWebhookEvent, + OpenAI::Webhooks::ResponseIncompleteWebhookEvent + ) + ) + end + def unwrap( + # The raw webhook payload as a string + payload + ) + end + + # @api private + sig { params(client: OpenAI::Client).returns(T.attached_class) } + def self.new(client:) + end + end + end +end diff --git a/rbi/openai/version.rbi b/rbi/openai/version.rbi new file mode 100644 index 00000000..8f6874c7 --- /dev/null +++ b/rbi/openai/version.rbi @@ -0,0 +1,5 @@ +# typed: strong + +module OpenAI + VERSION = T.let(T.unsafe(nil), String) +end diff --git a/release-please-config.json b/release-please-config.json index 29361af6..847228c1 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -60,5 +60,11 @@ } ], "release-type": "ruby", - "version-file": "lib/openai/version.rb" + "version-file": "lib/openai/version.rb", + "extra-files": [ + { + "type": "ruby-readme", + "path": "README.md" + } + ] } \ No newline at end of file diff --git a/scripts/bootstrap b/scripts/bootstrap index 88566757..cc31aa85 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -2,7 +2,7 @@ set -e -cd "$(dirname "$0")/.." +cd -- "$(dirname -- "$0")/.." if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ]; then brew bundle check >/dev/null 2>&1 || { @@ -13,4 +13,4 @@ fi echo "==> Installing Ruby dependencies…" -bundle install +exec -- bundle install "$@" diff --git a/scripts/detect-breaking-changes b/scripts/detect-breaking-changes new file mode 100755 index 00000000..750b7b64 --- /dev/null +++ b/scripts/detect-breaking-changes @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Detecting breaking changes" + +TEST_PATHS=( + test/openai/resources/shared_test.rb + test/openai/resources/completions_test.rb + test/openai/resources/chat_test.rb + test/openai/resources/chat/completions_test.rb + test/openai/resources/chat/completions/messages_test.rb + test/openai/resources/embeddings_test.rb + test/openai/resources/files_test.rb + test/openai/resources/images_test.rb + test/openai/resources/audio_test.rb + test/openai/resources/audio/transcriptions_test.rb + test/openai/resources/audio/translations_test.rb + test/openai/resources/audio/speech_test.rb + test/openai/resources/moderations_test.rb + test/openai/resources/models_test.rb + test/openai/resources/fine_tuning_test.rb + test/openai/resources/fine_tuning/methods_test.rb + test/openai/resources/fine_tuning/jobs_test.rb + test/openai/resources/fine_tuning/jobs/checkpoints_test.rb + test/openai/resources/fine_tuning/checkpoints_test.rb + test/openai/resources/fine_tuning/checkpoints/permissions_test.rb + test/openai/resources/fine_tuning/alpha_test.rb + test/openai/resources/fine_tuning/alpha/graders_test.rb + test/openai/resources/graders_test.rb + test/openai/resources/graders/grader_models_test.rb + test/openai/resources/vector_stores_test.rb + test/openai/resources/vector_stores/files_test.rb + test/openai/resources/vector_stores/file_batches_test.rb + test/openai/resources/webhooks_test.rb + test/openai/resources/beta_test.rb + test/openai/resources/beta/assistants_test.rb + test/openai/resources/beta/threads_test.rb + test/openai/resources/beta/threads/runs_test.rb + test/openai/resources/beta/threads/runs/steps_test.rb + test/openai/resources/beta/threads/messages_test.rb + test/openai/resources/batches_test.rb + test/openai/resources/uploads_test.rb + test/openai/resources/uploads/parts_test.rb + test/openai/resources/responses_test.rb + test/openai/resources/responses/input_items_test.rb + test/openai/resources/conversations_test.rb + test/openai/resources/conversations/items_test.rb + test/openai/resources/evals_test.rb + test/openai/resources/evals/runs_test.rb + test/openai/resources/evals/runs/output_items_test.rb + test/openai/resources/containers_test.rb + test/openai/resources/containers/files_test.rb + test/openai/resources/containers/files/content_test.rb + test/openai/client_test.rb +) + +for PATHSPEC in "${TEST_PATHS[@]}"; do + # Try to check out previous versions of the test files + # with the current SDK. + git checkout "$1" -- "${PATHSPEC}" 2>/dev/null || true +done + +# Instead of running the tests, use the linter to check if an +# older test is no longer compatible with the latest SDK. +./scripts/lint diff --git a/scripts/format b/scripts/format index 67b400de..177d1e63 100755 --- a/scripts/format +++ b/scripts/format @@ -5,4 +5,5 @@ set -e cd -- "$(dirname -- "$0")/.." echo "==> Running formatters" + exec -- bundle exec rake format "$@" diff --git a/scripts/lint b/scripts/lint index 39581dc1..08b0dbeb 100755 --- a/scripts/lint +++ b/scripts/lint @@ -4,4 +4,6 @@ set -e cd -- "$(dirname -- "$0")/.." +echo "==> Running linters" + exec -- bundle exec rake lint "$@" diff --git a/scripts/mock b/scripts/mock index d2814ae6..0b28f6ea 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" fi diff --git a/scripts/test b/scripts/test index 2e1fe093..e0dc1374 100755 --- a/scripts/test +++ b/scripts/test @@ -2,7 +2,7 @@ set -e -cd "$(dirname "$0")/.." +cd -- "$(dirname -- "$0")/.." RED='\033[0;31m' GREEN='\033[0;32m' @@ -43,7 +43,7 @@ elif ! prism_is_running ; then echo -e "To run the server, pass in the path or url of your OpenAPI" echo -e "spec to the prism command:" echo - echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}" + echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}" echo exit 1 diff --git a/sig/openai/base_client.rbs b/sig/openai/base_client.rbs deleted file mode 100644 index 4ca67417..00000000 --- a/sig/openai/base_client.rbs +++ /dev/null @@ -1,101 +0,0 @@ -module OpenAI - class BaseClient - type request_components = - { - method: Symbol, - path: String | ::Array[String], - query: ::Hash[String, (::Array[String] | String)?]?, - headers: ::Hash[String, (String - | Integer - | ::Array[(String | Integer)?])?]?, - body: top?, - unwrap: Symbol?, - page: Class?, - stream: Class?, - model: OpenAI::Converter::input?, - options: OpenAI::request_opts? - } - - type request_input = - { - method: Symbol, - url: URI::Generic, - headers: ::Hash[String, String], - body: top, - max_retries: Integer, - timeout: Float - } - - MAX_REDIRECTS: 20 - - PLATFORM_HEADERS: ::Hash[String, String] - - def self.validate!: (OpenAI::BaseClient::request_components req) -> void - - def self.should_retry?: ( - Integer status, - headers: ::Hash[String, String] - ) -> bool - - def self.follow_redirect: ( - OpenAI::BaseClient::request_input request, - status: Integer, - response_headers: ::Hash[String, String] - ) -> OpenAI::BaseClient::request_input - - # @private - attr_accessor requester: top - - def initialize: ( - base_url: String, - timeout: Float, - max_retries: Integer, - initial_retry_delay: Float, - max_retry_delay: Float, - headers: ::Hash[String, (String - | Integer - | ::Array[(String | Integer)?])?], - idempotency_header: String? - ) -> void - - private def auth_headers: -> ::Hash[String, String] - - private def generate_idempotency_key: -> String - - private def build_request: ( - OpenAI::BaseClient::request_components req, - OpenAI::request_options opts - ) -> OpenAI::BaseClient::request_input - - private def retry_delay: ( - ::Hash[String, String] headers, - retry_count: Integer - ) -> Float - - private def send_request: ( - OpenAI::BaseClient::request_input request, - redirect_count: Integer, - retry_count: Integer, - send_retry_header: bool - ) -> [Integer, top, Enumerable[String]] - - def request: - ( - Symbol method, - String | ::Array[String] path, - query: ::Hash[String, (::Array[String] | String)?]?, - headers: ::Hash[String, (String - | Integer - | ::Array[(String | Integer)?])?]?, - body: top?, - unwrap: Symbol?, - page: Class?, - stream: Class?, - model: OpenAI::Converter::input?, - options: OpenAI::request_opts? - ) -> top - | (OpenAI::BaseClient::request_components req) -> top - - def inspect: -> String - end -end diff --git a/sig/openai/base_model.rbs b/sig/openai/base_model.rbs deleted file mode 100644 index 5b709bfc..00000000 --- a/sig/openai/base_model.rbs +++ /dev/null @@ -1,242 +0,0 @@ -module OpenAI - module Converter - type input = OpenAI::Converter | Class - - def coerce: (top value) -> top - - def dump: (top value) -> top - - def try_strict_coerce: ( - top value - ) -> ([true, top, nil] | [false, bool, Integer]) - - def self.type_info: ( - { - const: (nil | bool | Integer | Float | Symbol)?, - enum: ^-> OpenAI::Converter::input?, - union: ^-> OpenAI::Converter::input? - } - | ^-> OpenAI::Converter::input - | OpenAI::Converter::input spec - ) -> (^-> top) - - def self.coerce: (OpenAI::Converter::input target, top value) -> top - - def self.dump: (OpenAI::Converter::input target, top value) -> top - - def self.try_strict_coerce: ( - OpenAI::Converter::input target, - top value - ) -> top - end - - class Unknown - extend OpenAI::Converter - - def self.===: (top other) -> bool - - def self.==: (top other) -> bool - - def self.coerce: (top value) -> top - - def self.dump: (top value) -> top - - def self.try_strict_coerce: ( - top value - ) -> ([true, top, nil] | [false, bool, Integer]) - end - - class BooleanModel - extend OpenAI::Converter - - def self.===: (top other) -> bool - - def self.==: (top other) -> bool - - def self.coerce: (bool | top value) -> (bool | top) - - def self.dump: (bool | top value) -> (bool | top) - - def self.try_strict_coerce: ( - top value - ) -> ([true, top, nil] | [false, bool, Integer]) - end - - class Enum - extend OpenAI::Converter - - def self.values: -> ::Array[(nil | bool | Integer | Float | Symbol)] - - private def self.finalize!: -> void - - def self.===: (top other) -> bool - - def self.==: (top other) -> bool - - def self.coerce: (String | Symbol | top value) -> (Symbol | top) - - def self.dump: (Symbol | top value) -> (Symbol | top) - - def self.try_strict_coerce: ( - top value - ) -> ([true, top, nil] | [false, bool, Integer]) - end - - class Union - extend OpenAI::Converter - - private def self.known_variants: -> ::Array[[Symbol?, Proc]] - - def self.variants: -> ::Array[[Symbol?, top]] - - private def self.discriminator: (Symbol property) -> void - - private def self.variant: ( - Symbol - | ::Hash[Symbol, top] - | ^-> OpenAI::Converter::input - | OpenAI::Converter::input key, - ?::Hash[Symbol, top] - | ^-> OpenAI::Converter::input - | OpenAI::Converter::input spec - ) -> void - - private def self.resolve_variant: (top value) -> OpenAI::Converter::input? - - def self.===: (top other) -> bool - - def self.==: (top other) -> bool - - def self.coerce: (top value) -> top - - def self.dump: (top value) -> top - - def self.try_strict_coerce: ( - top value - ) -> ([true, top, nil] | [false, bool, Integer]) - end - - class ArrayOf - include OpenAI::Converter - - def ===: (top other) -> bool - - def ==: (top other) -> bool - - def coerce: (Enumerable[top] | top value) -> (::Array[top] | top) - - def dump: (Enumerable[top] | top value) -> (::Array[top] | top) - - def try_strict_coerce: ( - top value - ) -> ([true, top, nil] | [false, bool, Integer]) - - def item_type: -> OpenAI::Converter::input - - def initialize: ( - ::Hash[Symbol, top] - | ^-> OpenAI::Converter::input - | OpenAI::Converter::input type_info, - ?::Hash[Symbol, top] spec - ) -> void - end - - class HashOf - include OpenAI::Converter - - def ===: (top other) -> bool - - def ==: (top other) -> bool - - def coerce: (::Hash[top, top] | top value) -> (::Hash[Symbol, top] | top) - - def dump: (::Hash[top, top] | top value) -> (::Hash[Symbol, top] | top) - - def try_strict_coerce: ( - top value - ) -> ([true, top, nil] | [false, bool, Integer]) - - def item_type: -> OpenAI::Converter::input - - def initialize: ( - ::Hash[Symbol, top] - | ^-> OpenAI::Converter::input - | OpenAI::Converter::input type_info, - ?::Hash[Symbol, top] spec - ) -> void - end - - class BaseModel - extend OpenAI::Converter - - type known_field = { mode: (:coerce | :dump)?, required: bool } - - def self.known_fields: -> ::Hash[Symbol, (OpenAI::BaseModel::known_field - & { type_fn: (^-> OpenAI::Converter::input) })] - - def self.fields: -> ::Hash[Symbol, (OpenAI::BaseModel::known_field - & { type: OpenAI::Converter::input })] - - def self.defaults: -> ::Hash[Symbol, (^-> Class)] - - private def self.add_field: ( - Symbol name_sym, - required: bool, - type_info: { - const: (nil | bool | Integer | Float | Symbol)?, - enum: ^-> OpenAI::Converter::input?, - union: ^-> OpenAI::Converter::input?, - api_name: Symbol - } - | ^-> OpenAI::Converter::input - | OpenAI::Converter::input, - spec: ::Hash[Symbol, top] - ) -> void - - def self.required: ( - Symbol name_sym, - ::Hash[Symbol, top] - | ^-> OpenAI::Converter::input - | OpenAI::Converter::input type_info, - ?::Hash[Symbol, top] spec - ) -> void - - def self.optional: ( - Symbol name_sym, - ::Hash[Symbol, top] - | ^-> OpenAI::Converter::input - | OpenAI::Converter::input type_info, - ?::Hash[Symbol, top] spec - ) -> void - - private def self.request_only: { -> void } -> void - - private def self.response_only: { -> void } -> void - - def ==: (top other) -> bool - - def self.coerce: ( - OpenAI::BaseModel | ::Hash[top, top] | top value - ) -> (instance | top) - - def self.dump: (instance | top value) -> (::Hash[top, top] | top) - - def self.try_strict_coerce: ( - top value - ) -> ([true, top, nil] | [false, bool, Integer]) - - def []: (Symbol key) -> top? - - def to_h: -> ::Hash[Symbol, top] - - alias to_hash to_h - - def deconstruct_keys: (::Array[Symbol]? keys) -> ::Hash[Symbol, top] - - def initialize: (?::Hash[Symbol, top] | self data) -> void - - def to_s: -> String - - def inspect: -> String - end -end diff --git a/sig/openai/base_page.rbs b/sig/openai/base_page.rbs deleted file mode 100644 index 384b9d9b..00000000 --- a/sig/openai/base_page.rbs +++ /dev/null @@ -1,20 +0,0 @@ -module OpenAI - module BasePage[Elem] - def next_page?: -> bool - - def next_page: -> self - - def auto_paging_each: { (Elem arg0) -> void } -> void - - def to_enum: -> Enumerable[Elem] - - alias enum_for to_enum - - def initialize: ( - client: OpenAI::BaseClient, - req: OpenAI::BaseClient::request_components, - headers: ::Hash[String, String], - page_data: top - ) -> void - end -end diff --git a/sig/openai/base_stream.rbs b/sig/openai/base_stream.rbs deleted file mode 100644 index e80fcba0..00000000 --- a/sig/openai/base_stream.rbs +++ /dev/null @@ -1,21 +0,0 @@ -module OpenAI - class BaseStream[Elem] - def initialize: ( - model: Class | OpenAI::Converter, - url: URI::Generic, - status: Integer, - response: top, - messages: Enumerable[OpenAI::Util::sse_message] - ) -> void - - private def iterator: -> Enumerable[Elem] - - def close: -> void - - def for_each: { (Elem arg0) -> void } -> void - - def to_enum: -> Enumerable[Elem] - - alias enum_for to_enum - end -end diff --git a/sig/openai/client.rbs b/sig/openai/client.rbs index 2d7d9a11..26c023fa 100644 --- a/sig/openai/client.rbs +++ b/sig/openai/client.rbs @@ -1,5 +1,5 @@ module OpenAI - class Client < OpenAI::BaseClient + class Client < OpenAI::Internal::Transport::BaseClient DEFAULT_MAX_RETRIES: 2 DEFAULT_TIMEOUT_IN_SECONDS: Float @@ -32,8 +32,12 @@ module OpenAI attr_reader fine_tuning: OpenAI::Resources::FineTuning + attr_reader graders: OpenAI::Resources::Graders + attr_reader vector_stores: OpenAI::Resources::VectorStores + attr_reader webhooks: OpenAI::Resources::Webhooks + attr_reader beta: OpenAI::Resources::Beta attr_reader batches: OpenAI::Resources::Batches @@ -42,17 +46,23 @@ module OpenAI attr_reader responses: OpenAI::Resources::Responses + attr_reader conversations: OpenAI::Resources::Conversations + + attr_reader evals: OpenAI::Resources::Evals + + attr_reader containers: OpenAI::Resources::Containers + private def auth_headers: -> ::Hash[String, String] def initialize: ( - base_url: String?, - api_key: String?, - organization: String?, - project: String?, - max_retries: Integer, - timeout: Float, - initial_retry_delay: Float, - max_retry_delay: Float + ?api_key: String?, + ?organization: String?, + ?project: String?, + ?base_url: String?, + ?max_retries: Integer, + ?timeout: Float, + ?initial_retry_delay: Float, + ?max_retry_delay: Float ) -> void end end diff --git a/sig/openai/cursor_page.rbs b/sig/openai/cursor_page.rbs deleted file mode 100644 index d7f7e58b..00000000 --- a/sig/openai/cursor_page.rbs +++ /dev/null @@ -1,16 +0,0 @@ -module OpenAI - class CursorPage[Elem] - include OpenAI::BasePage[Elem] - - attr_accessor data: ::Array[Elem] - - attr_accessor has_more: bool - - def initialize: ( - client: OpenAI::BaseClient, - req: OpenAI::BaseClient::request_components, - headers: ::Hash[String, String], - page_data: ::Hash[Symbol, top] - ) -> void - end -end diff --git a/sig/openai/errors.rbs b/sig/openai/errors.rbs index 08f05bd0..6bbd9f41 100644 --- a/sig/openai/errors.rbs +++ b/sig/openai/errors.rbs @@ -1,105 +1,116 @@ module OpenAI - class Error < StandardError - attr_reader cause: StandardError? - end - - class ConversionError < OpenAI::Error - end - - class APIError < OpenAI::Error - attr_reader url: URI::Generic - - attr_reader status: Integer? - - attr_reader body: top? - - attr_reader code: String? - - attr_reader param: String? - - attr_reader type: String? - - def initialize: ( - url: URI::Generic, - status: Integer?, - body: Object?, - request: nil, - response: nil, - message: String? - ) -> void - end - - class APIConnectionError < OpenAI::APIError - def initialize: ( - url: URI::Generic, - status: nil, - body: nil, - request: nil, - response: nil, - message: String? - ) -> void - end - - class APITimeoutError < OpenAI::APIConnectionError - def initialize: ( - url: URI::Generic, - status: nil, - body: nil, - request: nil, - response: nil, - message: String? - ) -> void - end - - class APIStatusError < OpenAI::APIError - def self.for: ( - url: URI::Generic, - status: Integer, - body: Object?, - request: nil, - response: nil, - message: String? - ) -> instance - - def initialize: ( - url: URI::Generic, - status: Integer, - body: Object?, - request: nil, - response: nil, - message: String? - ) -> void - end - - class BadRequestError < OpenAI::APIStatusError - HTTP_STATUS: 400 - end - - class AuthenticationError < OpenAI::APIStatusError - HTTP_STATUS: 401 - end - - class PermissionDeniedError < OpenAI::APIStatusError - HTTP_STATUS: 403 - end - - class NotFoundError < OpenAI::APIStatusError - HTTP_STATUS: 404 - end - - class ConflictError < OpenAI::APIStatusError - HTTP_STATUS: 409 - end - - class UnprocessableEntityError < OpenAI::APIStatusError - HTTP_STATUS: 422 - end - - class RateLimitError < OpenAI::APIStatusError - HTTP_STATUS: 429 - end - - class InternalServerError < OpenAI::APIStatusError - HTTP_STATUS: Range[Integer] + module Errors + class Error < StandardError + attr_accessor cause: StandardError? + end + + class ConversionError < OpenAI::Errors::Error + def cause: -> StandardError? + + def initialize: ( + on: Class, + method: Symbol, + target: top, + value: top, + ?cause: StandardError? + ) -> void + end + + class APIError < OpenAI::Errors::Error + attr_accessor url: URI::Generic + + attr_accessor status: Integer? + + attr_accessor body: top? + + attr_accessor code: String? + + attr_accessor param: String? + + attr_accessor type: String? + + def initialize: ( + url: URI::Generic, + ?status: Integer?, + ?body: Object?, + ?request: nil, + ?response: nil, + ?message: String? + ) -> void + end + + class APIConnectionError < OpenAI::Errors::APIError + def initialize: ( + url: URI::Generic, + ?status: nil, + ?body: nil, + ?request: nil, + ?response: nil, + ?message: String? + ) -> void + end + + class APITimeoutError < OpenAI::Errors::APIConnectionError + def initialize: ( + url: URI::Generic, + ?status: nil, + ?body: nil, + ?request: nil, + ?response: nil, + ?message: String? + ) -> void + end + + class APIStatusError < OpenAI::Errors::APIError + def self.for: ( + url: URI::Generic, + status: Integer, + body: Object?, + request: nil, + response: nil, + ?message: String? + ) -> instance + + def initialize: ( + url: URI::Generic, + status: Integer, + body: Object?, + request: nil, + response: nil, + ?message: String? + ) -> void + end + + class BadRequestError < OpenAI::Errors::APIStatusError + HTTP_STATUS: 400 + end + + class AuthenticationError < OpenAI::Errors::APIStatusError + HTTP_STATUS: 401 + end + + class PermissionDeniedError < OpenAI::Errors::APIStatusError + HTTP_STATUS: 403 + end + + class NotFoundError < OpenAI::Errors::APIStatusError + HTTP_STATUS: 404 + end + + class ConflictError < OpenAI::Errors::APIStatusError + HTTP_STATUS: 409 + end + + class UnprocessableEntityError < OpenAI::Errors::APIStatusError + HTTP_STATUS: 422 + end + + class RateLimitError < OpenAI::Errors::APIStatusError + HTTP_STATUS: 429 + end + + class InternalServerError < OpenAI::Errors::APIStatusError + HTTP_STATUS: Range[Integer] + end end end diff --git a/sig/openai/extern.rbs b/sig/openai/extern.rbs deleted file mode 100644 index 23069f69..00000000 --- a/sig/openai/extern.rbs +++ /dev/null @@ -1,4 +0,0 @@ -module OpenAI - module Extern - end -end diff --git a/sig/openai/file_part.rbs b/sig/openai/file_part.rbs new file mode 100644 index 00000000..c517bee8 --- /dev/null +++ b/sig/openai/file_part.rbs @@ -0,0 +1,21 @@ +module OpenAI + class FilePart + attr_reader content: Pathname | StringIO | IO | String + + attr_reader content_type: String? + + attr_reader filename: String? + + private def read: -> String + + def to_json: (*top a) -> String + + def to_yaml: (*top a) -> String + + def initialize: ( + Pathname | StringIO | IO | String content, + ?filename: String?, + ?content_type: String? + ) -> void + end +end diff --git a/sig/openai/internal.rbs b/sig/openai/internal.rbs new file mode 100644 index 00000000..105072ce --- /dev/null +++ b/sig/openai/internal.rbs @@ -0,0 +1,9 @@ +module OpenAI + module Internal + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + type file_input = Pathname | StringIO | IO | String | OpenAI::FilePart + + OMIT: Object + end +end diff --git a/sig/openai/internal/conversation_cursor_page.rbs b/sig/openai/internal/conversation_cursor_page.rbs new file mode 100644 index 00000000..f2b11b2c --- /dev/null +++ b/sig/openai/internal/conversation_cursor_page.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Internal + class ConversationCursorPage[Elem] + include OpenAI::Internal::Type::BasePage[Elem] + + attr_accessor data: ::Array[Elem]? + + attr_accessor has_more: bool + + attr_accessor last_id: String + + def inspect: -> String + end + end +end diff --git a/sig/openai/internal/cursor_page.rbs b/sig/openai/internal/cursor_page.rbs new file mode 100644 index 00000000..3a79ad6a --- /dev/null +++ b/sig/openai/internal/cursor_page.rbs @@ -0,0 +1,13 @@ +module OpenAI + module Internal + class CursorPage[Elem] + include OpenAI::Internal::Type::BasePage[Elem] + + attr_accessor data: ::Array[Elem]? + + attr_accessor has_more: bool + + def inspect: -> String + end + end +end diff --git a/sig/openai/internal/page.rbs b/sig/openai/internal/page.rbs new file mode 100644 index 00000000..9cad7eed --- /dev/null +++ b/sig/openai/internal/page.rbs @@ -0,0 +1,13 @@ +module OpenAI + module Internal + class Page[Elem] + include OpenAI::Internal::Type::BasePage[Elem] + + attr_accessor data: ::Array[Elem]? + + attr_accessor object: String + + def inspect: -> String + end + end +end diff --git a/sig/openai/internal/stream.rbs b/sig/openai/internal/stream.rbs new file mode 100644 index 00000000..b723e314 --- /dev/null +++ b/sig/openai/internal/stream.rbs @@ -0,0 +1,9 @@ +module OpenAI + module Internal + class Stream[Elem] + include OpenAI::Internal::Type::BaseStream[OpenAI::Internal::Util::server_sent_event, Elem] + + private def iterator: -> Enumerable[Elem] + end + end +end diff --git a/sig/openai/internal/transport/base_client.rbs b/sig/openai/internal/transport/base_client.rbs new file mode 100644 index 00000000..db2e8ff4 --- /dev/null +++ b/sig/openai/internal/transport/base_client.rbs @@ -0,0 +1,131 @@ +module OpenAI + module Internal + module Transport + class BaseClient + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + type request_components = + { + method: Symbol, + path: String | ::Array[String], + query: ::Hash[String, (::Array[String] | String)?]?, + headers: ::Hash[String, (String + | Integer + | ::Array[(String | Integer)?])?]?, + body: top?, + unwrap: (Symbol + | Integer + | ::Array[(Symbol | Integer)] + | (^(top arg0) -> top))?, + page: Class?, + stream: Class?, + model: OpenAI::Internal::Type::Converter::input?, + options: OpenAI::request_opts? + } + type request_input = + { + method: Symbol, + url: URI::Generic, + headers: ::Hash[String, String], + body: top, + max_retries: Integer, + timeout: Float + } + + MAX_REDIRECTS: 20 + + PLATFORM_HEADERS: ::Hash[String, String] + + def self.validate!: ( + OpenAI::Internal::Transport::BaseClient::request_components req + ) -> void + + def self.should_retry?: ( + Integer status, + headers: ::Hash[String, String] + ) -> bool + + def self.follow_redirect: ( + OpenAI::Internal::Transport::BaseClient::request_input request, + status: Integer, + response_headers: ::Hash[String, String] + ) -> OpenAI::Internal::Transport::BaseClient::request_input + + def self.reap_connection!: ( + Integer | OpenAI::Errors::APIConnectionError status, + stream: Enumerable[String]? + ) -> void + + attr_reader base_url: URI::Generic + + attr_reader timeout: Float + + attr_reader max_retries: Integer + + attr_reader initial_retry_delay: Float + + attr_reader max_retry_delay: Float + + attr_reader headers: ::Hash[String, String] + + attr_reader idempotency_header: String? + + # @api private + attr_reader requester: OpenAI::Internal::Transport::PooledNetRequester + + def initialize: ( + base_url: String, + ?timeout: Float, + ?max_retries: Integer, + ?initial_retry_delay: Float, + ?max_retry_delay: Float, + ?headers: ::Hash[String, (String + | Integer + | ::Array[(String | Integer)?])?], + ?idempotency_header: String? + ) -> void + + private def auth_headers: -> ::Hash[String, String] + + private def generate_idempotency_key: -> String + + private def build_request: ( + OpenAI::Internal::Transport::BaseClient::request_components req, + OpenAI::request_options opts + ) -> OpenAI::Internal::Transport::BaseClient::request_input + + private def retry_delay: ( + ::Hash[String, String] headers, + retry_count: Integer + ) -> Float + + def send_request: ( + OpenAI::Internal::Transport::BaseClient::request_input request, + redirect_count: Integer, + retry_count: Integer, + send_retry_header: bool + ) -> [Integer, top, Enumerable[String]] + + def request: ( + Symbol method, + String | ::Array[String] path, + ?query: ::Hash[String, (::Array[String] | String)?]?, + ?headers: ::Hash[String, (String + | Integer + | ::Array[(String | Integer)?])?]?, + ?body: top?, + ?unwrap: (Symbol + | Integer + | ::Array[(Symbol | Integer)] + | (^(top arg0) -> top))?, + ?page: Class?, + ?stream: Class?, + ?model: OpenAI::Internal::Type::Converter::input?, + ?options: OpenAI::request_opts? + ) -> top + + def inspect: -> String + end + end + end +end diff --git a/sig/openai/internal/transport/pooled_net_requester.rbs b/sig/openai/internal/transport/pooled_net_requester.rbs new file mode 100644 index 00000000..1719d8fd --- /dev/null +++ b/sig/openai/internal/transport/pooled_net_requester.rbs @@ -0,0 +1,45 @@ +module OpenAI + module Internal + module Transport + class PooledNetRequester + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + type request = + { + method: Symbol, + url: URI::Generic, + headers: ::Hash[String, String], + body: top, + deadline: Float + } + + KEEP_ALIVE_TIMEOUT: 30 + + DEFAULT_MAX_CONNECTIONS: Integer + + def self.connect: (URI::Generic url) -> top + + def self.calibrate_socket_timeout: (top conn, Float deadline) -> void + + def self.build_request: ( + OpenAI::Internal::Transport::PooledNetRequester::request request + ) { + (String arg0) -> void + } -> [top, (^-> void)] + + private def with_pool: ( + URI::Generic url, + deadline: Float + ) { + (top arg0) -> void + } -> void + + def execute: ( + OpenAI::Internal::Transport::PooledNetRequester::request request + ) -> [Integer, top, Enumerable[String]] + + def initialize: (?size: Integer) -> void + end + end + end +end diff --git a/sig/openai/internal/type/array_of.rbs b/sig/openai/internal/type/array_of.rbs new file mode 100644 index 00000000..000ed3f5 --- /dev/null +++ b/sig/openai/internal/type/array_of.rbs @@ -0,0 +1,48 @@ +module OpenAI + module Internal + module Type + class ArrayOf[Elem] + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + def self.[]: ( + ::Hash[Symbol, top] + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input type_info, + ?::Hash[Symbol, top] spec + ) -> instance + + def ===: (top other) -> bool + + def ==: (top other) -> bool + + def hash: -> Integer + + def coerce: ( + ::Array[top] | top value, + state: OpenAI::Internal::Type::Converter::coerce_state + ) -> (::Array[top] | top) + + def dump: ( + ::Array[top] | top value, + state: OpenAI::Internal::Type::Converter::dump_state + ) -> (::Array[top] | top) + + def to_sorbet_type: -> top + + def item_type: -> Elem + + def nilable?: -> bool + + def initialize: ( + ::Hash[Symbol, top] + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input type_info, + ?::Hash[Symbol, top] spec + ) -> void + + def inspect: (?depth: Integer) -> String + end + end + end +end diff --git a/sig/openai/internal/type/base_model.rbs b/sig/openai/internal/type/base_model.rbs new file mode 100644 index 00000000..f9e57a2e --- /dev/null +++ b/sig/openai/internal/type/base_model.rbs @@ -0,0 +1,102 @@ +module OpenAI + module Internal + module Type + class BaseModel + extend OpenAI::Internal::Type::Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + type known_field = + { mode: (:coerce | :dump)?, required: bool, nilable: bool } + + def self.inherited: (self child) -> void + + def self.known_fields: -> ::Hash[Symbol, (OpenAI::Internal::Type::BaseModel::known_field + & { type_fn: (^-> OpenAI::Internal::Type::Converter::input) })] + + def self.fields: -> ::Hash[Symbol, (OpenAI::Internal::Type::BaseModel::known_field + & { type: OpenAI::Internal::Type::Converter::input })] + + private def self.add_field: ( + Symbol name_sym, + required: bool, + type_info: { + const: (nil | bool | Integer | Float | Symbol)?, + enum: ^-> OpenAI::Internal::Type::Converter::input?, + union: ^-> OpenAI::Internal::Type::Converter::input?, + api_name: Symbol + } + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input, + spec: ::Hash[Symbol, top] + ) -> void + + def self.required: ( + Symbol name_sym, + ::Hash[Symbol, top] + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input type_info, + ?::Hash[Symbol, top] spec + ) -> void + + def self.optional: ( + Symbol name_sym, + ::Hash[Symbol, top] + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input type_info, + ?::Hash[Symbol, top] spec + ) -> void + + private def self.request_only: { -> void } -> void + + private def self.response_only: { -> void } -> void + + def self.==: (top other) -> bool + + def self.hash: -> Integer + + def ==: (top other) -> bool + + def hash: -> Integer + + def self.coerce: ( + OpenAI::Internal::Type::BaseModel | ::Hash[top, top] | top value, + state: OpenAI::Internal::Type::Converter::coerce_state + ) -> (instance | top) + + def self.dump: ( + instance | top value, + state: OpenAI::Internal::Type::Converter::dump_state + ) -> (::Hash[top, top] | top) + + def self.to_sorbet_type: -> top + + def self.recursively_to_h: ( + OpenAI::Internal::Type::BaseModel model, + convert: bool + ) -> ::Hash[Symbol, top] + + def []: (Symbol key) -> top? + + def to_h: -> ::Hash[Symbol, top] + + alias to_hash to_h + + def deep_to_h: -> ::Hash[Symbol, top] + + def deconstruct_keys: (::Array[Symbol]? keys) -> ::Hash[Symbol, top] + + def to_json: (*top a) -> String + + def to_yaml: (*top a) -> String + + def initialize: (?::Hash[Symbol, top] | instance data) -> void + + def self.inspect: (?depth: Integer) -> String + + def to_s: -> String + + def inspect: -> String + end + end + end +end diff --git a/sig/openai/internal/type/base_page.rbs b/sig/openai/internal/type/base_page.rbs new file mode 100644 index 00000000..b04062a6 --- /dev/null +++ b/sig/openai/internal/type/base_page.rbs @@ -0,0 +1,24 @@ +module OpenAI + module Internal + module Type + module BasePage[Elem] + def next_page?: -> bool + + def next_page: -> instance + + def auto_paging_each: { (Elem arg0) -> void } -> void + + def to_enum: -> Enumerable[Elem] + + alias enum_for to_enum + + def initialize: ( + client: OpenAI::Internal::Transport::BaseClient, + req: OpenAI::Internal::Transport::BaseClient::request_components, + headers: ::Hash[String, String], + page_data: top + ) -> void + end + end + end +end diff --git a/sig/openai/internal/type/base_stream.rbs b/sig/openai/internal/type/base_stream.rbs new file mode 100644 index 00000000..75f49297 --- /dev/null +++ b/sig/openai/internal/type/base_stream.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Internal + module Type + module BaseStream[Message, Elem] + include Enumerable[Elem] + + def self.defer_closing: ( + Enumerable[top] stream + ) -> (^(Integer arg0) -> void) + + def close: -> void + + private def iterator: -> Enumerable[Elem] + + def each: { (Elem arg0) -> void } -> void + + def to_enum: -> Enumerator[Elem] + + alias enum_for to_enum + + def initialize: ( + model: Class | OpenAI::Internal::Type::Converter, + url: URI::Generic, + status: Integer, + response: top, + unwrap: Symbol + | Integer + | ::Array[Symbol | Integer] + | ^(top arg0) -> top, + stream: Enumerable[Message] + ) -> void + + def inspect: -> String + end + end + end +end diff --git a/sig/openai/internal/type/boolean.rbs b/sig/openai/internal/type/boolean.rbs new file mode 100644 index 00000000..04edea4b --- /dev/null +++ b/sig/openai/internal/type/boolean.rbs @@ -0,0 +1,26 @@ +module OpenAI + module Internal + module Type + class Boolean + extend OpenAI::Internal::Type::Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + def self.===: (top other) -> bool + + def self.==: (top other) -> bool + + def self.coerce: ( + bool | top value, + state: OpenAI::Internal::Type::Converter::coerce_state + ) -> (bool | top) + + def self.dump: ( + bool | top value, + state: OpenAI::Internal::Type::Converter::dump_state + ) -> (bool | top) + + def self.to_sorbet_type: -> top + end + end + end +end diff --git a/sig/openai/internal/type/converter.rbs b/sig/openai/internal/type/converter.rbs new file mode 100644 index 00000000..3a4aa4a6 --- /dev/null +++ b/sig/openai/internal/type/converter.rbs @@ -0,0 +1,79 @@ +module OpenAI + module Internal + module Type + module Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + type input = OpenAI::Internal::Type::Converter | Class + + type coerce_state = + { + translate_names: bool, + strictness: bool, + exactness: { yes: Integer, no: Integer, maybe: Integer }, + error: Class, + branched: Integer + } + + type dump_state = { can_retry: bool } + + def coerce: ( + top value, + state: OpenAI::Internal::Type::Converter::coerce_state + ) -> top + + def dump: ( + top value, + state: OpenAI::Internal::Type::Converter::dump_state + ) -> top + + def inspect: (?depth: Integer) -> String + + def self.type_info: ( + { + const: (nil | bool | Integer | Float | Symbol)?, + enum: ^-> OpenAI::Internal::Type::Converter::input?, + union: ^-> OpenAI::Internal::Type::Converter::input? + } + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input spec + ) -> (^-> top) + + def self.meta_info: ( + { + const: (nil | bool | Integer | Float | Symbol)?, + enum: ^-> OpenAI::Internal::Type::Converter::input?, + union: ^-> OpenAI::Internal::Type::Converter::input? + } + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input type_info, + { + const: (nil | bool | Integer | Float | Symbol)?, + enum: ^-> OpenAI::Internal::Type::Converter::input?, + union: ^-> OpenAI::Internal::Type::Converter::input? + } + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input spec + ) -> ::Hash[Symbol, top] + + def self.new_coerce_state: ( + ?translate_names: bool + ) -> OpenAI::Internal::Type::Converter::coerce_state + + def self.coerce: ( + OpenAI::Internal::Type::Converter::input target, + top value, + ?state: OpenAI::Internal::Type::Converter::coerce_state + ) -> top + + def self.dump: ( + OpenAI::Internal::Type::Converter::input target, + top value, + ?state: OpenAI::Internal::Type::Converter::dump_state + ) -> top + + def self.inspect: (top target, depth: Integer) -> String + end + end + end +end diff --git a/sig/openai/internal/type/enum.rbs b/sig/openai/internal/type/enum.rbs new file mode 100644 index 00000000..2b6f3fb2 --- /dev/null +++ b/sig/openai/internal/type/enum.rbs @@ -0,0 +1,32 @@ +module OpenAI + module Internal + module Type + module Enum + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + def self.values: -> ::Array[(nil | bool | Integer | Float | Symbol)] + + def ===: (top other) -> bool + + def ==: (top other) -> bool + + def hash: -> Integer + + def coerce: ( + String | Symbol | top value, + state: OpenAI::Internal::Type::Converter::coerce_state + ) -> (Symbol | top) + + def dump: ( + Symbol | top value, + state: OpenAI::Internal::Type::Converter::dump_state + ) -> (Symbol | top) + + def to_sorbet_type: -> top + + def inspect: (?depth: Integer) -> String + end + end + end +end diff --git a/sig/openai/internal/type/file_input.rbs b/sig/openai/internal/type/file_input.rbs new file mode 100644 index 00000000..db81644c --- /dev/null +++ b/sig/openai/internal/type/file_input.rbs @@ -0,0 +1,25 @@ +module OpenAI + module Internal + module Type + class FileInput + extend OpenAI::Internal::Type::Converter + + def self.===: (top other) -> bool + + def self.==: (top other) -> bool + + def self.coerce: ( + StringIO | String | top value, + state: OpenAI::Internal::Type::Converter::coerce_state + ) -> (StringIO | top) + + def self.dump: ( + Pathname | StringIO | IO | String | top value, + state: OpenAI::Internal::Type::Converter::dump_state + ) -> (Pathname | StringIO | IO | String | top) + + def self.to_sorbet_type: -> top + end + end + end +end diff --git a/sig/openai/internal/type/hash_of.rbs b/sig/openai/internal/type/hash_of.rbs new file mode 100644 index 00000000..1c9d1d58 --- /dev/null +++ b/sig/openai/internal/type/hash_of.rbs @@ -0,0 +1,48 @@ +module OpenAI + module Internal + module Type + class HashOf[Elem] + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + def self.[]: ( + ::Hash[Symbol, top] + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input type_info, + ?::Hash[Symbol, top] spec + ) -> instance + + def ===: (top other) -> bool + + def ==: (top other) -> bool + + def hash: -> Integer + + def coerce: ( + ::Hash[top, top] | top value, + state: OpenAI::Internal::Type::Converter::coerce_state + ) -> (::Hash[Symbol, top] | top) + + def dump: ( + ::Hash[top, top] | top value, + state: OpenAI::Internal::Type::Converter::dump_state + ) -> (::Hash[Symbol, top] | top) + + def to_sorbet_type: -> top + + def item_type: -> Elem + + def nilable?: -> bool + + def initialize: ( + ::Hash[Symbol, top] + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input type_info, + ?::Hash[Symbol, top] spec + ) -> void + + def inspect: (?depth: Integer) -> String + end + end + end +end diff --git a/sig/openai/internal/type/request_parameters.rbs b/sig/openai/internal/type/request_parameters.rbs new file mode 100644 index 00000000..6519af95 --- /dev/null +++ b/sig/openai/internal/type/request_parameters.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Internal + module Type + type request_parameters = { request_options: OpenAI::request_opts } + + module RequestParameters + attr_reader request_options: OpenAI::request_opts + + def request_options=: (OpenAI::request_opts) -> OpenAI::request_opts + + module Converter + def dump_request: (top params) -> [top, ::Hash[Symbol, top]] + end + end + end + end +end diff --git a/sig/openai/internal/type/union.rbs b/sig/openai/internal/type/union.rbs new file mode 100644 index 00000000..6f209ac7 --- /dev/null +++ b/sig/openai/internal/type/union.rbs @@ -0,0 +1,52 @@ +module OpenAI + module Internal + module Type + module Union + include OpenAI::Internal::Type::Converter + include OpenAI::Internal::Util::SorbetRuntimeSupport + + private def self.known_variants: -> ::Array[[Symbol?, (^-> OpenAI::Internal::Type::Converter::input), ::Hash[Symbol, top]]] + + def self.derefed_variants: -> ::Array[[Symbol?, top, ::Hash[Symbol, top]]] + + def self.variants: -> ::Array[top] + + private def self.discriminator: (Symbol property) -> void + + private def self.variant: ( + Symbol + | ::Hash[Symbol, top] + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input key, + ?::Hash[Symbol, top] + | ^-> OpenAI::Internal::Type::Converter::input + | OpenAI::Internal::Type::Converter::input spec + ) -> void + + private def self.resolve_variant: ( + top value + ) -> OpenAI::Internal::Type::Converter::input? + + def ===: (top other) -> bool + + def ==: (top other) -> bool + + def hash: -> Integer + + def coerce: ( + top value, + state: OpenAI::Internal::Type::Converter::coerce_state + ) -> top + + def dump: ( + top value, + state: OpenAI::Internal::Type::Converter::dump_state + ) -> top + + def to_sorbet_type: -> top + + def inspect: (?depth: Integer) -> String + end + end + end +end diff --git a/sig/openai/internal/type/unknown.rbs b/sig/openai/internal/type/unknown.rbs new file mode 100644 index 00000000..249f91dc --- /dev/null +++ b/sig/openai/internal/type/unknown.rbs @@ -0,0 +1,26 @@ +module OpenAI + module Internal + module Type + class Unknown + extend OpenAI::Internal::Type::Converter + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + def self.===: (top other) -> bool + + def self.==: (top other) -> bool + + def self.coerce: ( + top value, + state: OpenAI::Internal::Type::Converter::coerce_state + ) -> top + + def self.dump: ( + top value, + state: OpenAI::Internal::Type::Converter::dump_state + ) -> top + + def self.to_sorbet_type: -> top + end + end + end +end diff --git a/sig/openai/internal/util.rbs b/sig/openai/internal/util.rbs new file mode 100644 index 00000000..ec425e9f --- /dev/null +++ b/sig/openai/internal/util.rbs @@ -0,0 +1,185 @@ +module OpenAI + module Internal + module Util + extend OpenAI::Internal::Util::SorbetRuntimeSupport + + def self?.monotonic_secs: -> Float + + def self?.walk_namespaces: ( + Module | Class ns + ) -> Enumerable[(Module | Class)] + + def self?.arch: -> String + + def self?.os: -> String + + def self?.primitive?: (top input) -> bool + + def self?.coerce_boolean: (String | bool input) -> (bool | top) + + def self?.coerce_boolean!: (String | bool input) -> bool? + + def self?.coerce_integer: (String | Integer input) -> (Integer | top) + + def self?.coerce_float: (String | Integer | Float input) -> (Float | top) + + def self?.coerce_hash: (top input) -> (::Hash[top, top] | top) + + def self?.coerce_hash!: (top input) -> ::Hash[top, top]? + + def self?.deep_merge_lr: (top lhs, top rhs, ?concat: bool) -> top + + def self?.deep_merge: ( + *::Array[top] values, + ?sentinel: top?, + ?concat: bool + ) -> top + + def self?.dig: ( + ::Hash[Symbol, top] | ::Array[top] | top data, + (Symbol + | Integer + | ::Array[(Symbol | Integer)] + | (^(top arg0) -> top))? pick + ) { + -> top? + } -> top? + + def self?.uri_origin: (URI::Generic uri) -> String + + def self?.interpolate_path: (String | ::Array[String] path) -> String + + def self?.decode_query: (String? query) -> ::Hash[String, ::Array[String]] + + def self?.encode_query: ( + ::Hash[String, (::Array[String] | String)?]? query + ) -> String? + + type parsed_uri = + { + scheme: String?, + host: String?, + port: Integer?, + path: String?, + query: ::Hash[String, ::Array[String]] + } + + def self?.parse_uri: ( + URI::Generic | String url + ) -> OpenAI::Internal::Util::parsed_uri + + def self?.unparse_uri: ( + OpenAI::Internal::Util::parsed_uri parsed + ) -> URI::Generic + + def self?.join_parsed_uri: ( + OpenAI::Internal::Util::parsed_uri lhs, + OpenAI::Internal::Util::parsed_uri rhs + ) -> URI::Generic + + def self?.normalized_headers: ( + *::Hash[String, (String + | Integer + | ::Array[(String | Integer)?])?] headers + ) -> ::Hash[String, String] + + class ReadIOAdapter + def close?: -> bool? + + def close: -> void + + private def read_enum: (Integer? max_len) -> String + + def read: (?Integer? max_len, ?String? out_string) -> String? + + def initialize: ( + String | Pathname | StringIO | Enumerable[String] src + ) { + (String arg0) -> void + } -> void + end + + def self?.writable_enum: { + (Enumerator::Yielder y) -> void + } -> Enumerable[String] + + JSON_CONTENT: Regexp + JSONL_CONTENT: Regexp + + def self?.write_multipart_content: ( + Enumerator::Yielder y, + val: top, + closing: ::Array[^-> void], + ?content_type: String? + ) -> void + + def self?.write_multipart_chunk: ( + Enumerator::Yielder y, + boundary: String, + key: Symbol | String, + val: top, + closing: ::Array[^-> void] + ) -> void + + def self?.encode_multipart_streaming: ( + top body + ) -> [String, Enumerable[String]] + + def self?.encode_content: ( + ::Hash[String, String] headers, + top body + ) -> top + + def self?.force_charset!: (String content_type, text: String) -> void + + def self?.decode_content: ( + ::Hash[String, String] headers, + stream: Enumerable[String], + ?suppress_error: bool + ) -> top + + def self?.fused_enum: ( + Enumerable[top] enum, + ?external: bool + ) { + -> void + } -> Enumerable[top] + + def self?.close_fused!: (Enumerable[top]? enum) -> void + + def self?.chain_fused: ( + Enumerable[top]? enum + ) { + (Enumerator::Yielder arg0) -> void + } -> Enumerable[top] + + type server_sent_event = + { event: String?, data: String?, id: String?, retry: Integer? } + + def self?.decode_lines: (Enumerable[String] enum) -> Enumerable[String] + + def self?.decode_sse: ( + Enumerable[String] lines + ) -> Enumerable[OpenAI::Internal::Util::server_sent_event] + + module SorbetRuntimeSupport + class MissingSorbetRuntimeError < ::RuntimeError + end + + private def sorbet_runtime_constants: -> ::Hash[Symbol, top] + + def const_missing: (Symbol name) -> void + + def sorbet_constant_defined?: (Symbol name) -> bool + + def define_sorbet_constant!: (Symbol name) { -> top } -> void + + def to_sorbet_type: -> top + + def self.to_sorbet_type: ( + OpenAI::Internal::Util::SorbetRuntimeSupport | top `type` + ) -> top + end + end + end +end diff --git a/sig/openai/models.rbs b/sig/openai/models.rbs new file mode 100644 index 00000000..338313ae --- /dev/null +++ b/sig/openai/models.rbs @@ -0,0 +1,217 @@ +module OpenAI + module AllModels = OpenAI::Models::AllModels + + module Audio = OpenAI::Models::Audio + + module AudioModel = OpenAI::Models::AudioModel + + module AudioResponseFormat = OpenAI::Models::AudioResponseFormat + + class AutoFileChunkingStrategyParam = OpenAI::Models::AutoFileChunkingStrategyParam + + class Batch = OpenAI::Models::Batch + + class BatchCancelParams = OpenAI::Models::BatchCancelParams + + class BatchCreateParams = OpenAI::Models::BatchCreateParams + + class BatchError = OpenAI::Models::BatchError + + class BatchListParams = OpenAI::Models::BatchListParams + + class BatchRequestCounts = OpenAI::Models::BatchRequestCounts + + class BatchRetrieveParams = OpenAI::Models::BatchRetrieveParams + + module Beta = OpenAI::Models::Beta + + module Chat = OpenAI::Models::Chat + + module ChatModel = OpenAI::Models::ChatModel + + class ComparisonFilter = OpenAI::Models::ComparisonFilter + + class Completion = OpenAI::Models::Completion + + class CompletionChoice = OpenAI::Models::CompletionChoice + + class CompletionCreateParams = OpenAI::Models::CompletionCreateParams + + class CompletionUsage = OpenAI::Models::CompletionUsage + + class CompoundFilter = OpenAI::Models::CompoundFilter + + class ContainerCreateParams = OpenAI::Models::ContainerCreateParams + + class ContainerDeleteParams = OpenAI::Models::ContainerDeleteParams + + class ContainerListParams = OpenAI::Models::ContainerListParams + + class ContainerRetrieveParams = OpenAI::Models::ContainerRetrieveParams + + module Containers = OpenAI::Models::Containers + + module Conversations = OpenAI::Models::Conversations + + class CreateEmbeddingResponse = OpenAI::Models::CreateEmbeddingResponse + + module CustomToolInputFormat = OpenAI::Models::CustomToolInputFormat + + class Embedding = OpenAI::Models::Embedding + + class EmbeddingCreateParams = OpenAI::Models::EmbeddingCreateParams + + module EmbeddingModel = OpenAI::Models::EmbeddingModel + + class ErrorObject = OpenAI::Models::ErrorObject + + class EvalCreateParams = OpenAI::Models::EvalCreateParams + + class EvalCustomDataSourceConfig = OpenAI::Models::EvalCustomDataSourceConfig + + class EvalDeleteParams = OpenAI::Models::EvalDeleteParams + + class EvalListParams = OpenAI::Models::EvalListParams + + class EvalRetrieveParams = OpenAI::Models::EvalRetrieveParams + + module Evals = OpenAI::Models::Evals + + class EvalStoredCompletionsDataSourceConfig = OpenAI::Models::EvalStoredCompletionsDataSourceConfig + + class EvalUpdateParams = OpenAI::Models::EvalUpdateParams + + module FileChunkingStrategy = OpenAI::Models::FileChunkingStrategy + + module FileChunkingStrategyParam = OpenAI::Models::FileChunkingStrategyParam + + class FileContent = OpenAI::Models::FileContent + + class FileContentParams = OpenAI::Models::FileContentParams + + class FileCreateParams = OpenAI::Models::FileCreateParams + + class FileDeleted = OpenAI::Models::FileDeleted + + class FileDeleteParams = OpenAI::Models::FileDeleteParams + + class FileListParams = OpenAI::Models::FileListParams + + class FileObject = OpenAI::Models::FileObject + + module FilePurpose = OpenAI::Models::FilePurpose + + class FileRetrieveParams = OpenAI::Models::FileRetrieveParams + + module FineTuning = OpenAI::Models::FineTuning + + class FunctionDefinition = OpenAI::Models::FunctionDefinition + + FunctionParameters: OpenAI::Internal::Type::Converter + + module Graders = OpenAI::Models::Graders + + class Image = OpenAI::Models::Image + + class ImageCreateVariationParams = OpenAI::Models::ImageCreateVariationParams + + class ImageEditCompletedEvent = OpenAI::Models::ImageEditCompletedEvent + + class ImageEditParams = OpenAI::Models::ImageEditParams + + class ImageEditPartialImageEvent = OpenAI::Models::ImageEditPartialImageEvent + + module ImageEditStreamEvent = OpenAI::Models::ImageEditStreamEvent + + class ImageGenCompletedEvent = OpenAI::Models::ImageGenCompletedEvent + + class ImageGenerateParams = OpenAI::Models::ImageGenerateParams + + class ImageGenPartialImageEvent = OpenAI::Models::ImageGenPartialImageEvent + + module ImageGenStreamEvent = OpenAI::Models::ImageGenStreamEvent + + module ImageModel = OpenAI::Models::ImageModel + + class ImagesResponse = OpenAI::Models::ImagesResponse + + Metadata: OpenAI::Internal::Type::Converter + + class Model = OpenAI::Models::Model + + class ModelDeleted = OpenAI::Models::ModelDeleted + + class ModelDeleteParams = OpenAI::Models::ModelDeleteParams + + class ModelListParams = OpenAI::Models::ModelListParams + + class ModelRetrieveParams = OpenAI::Models::ModelRetrieveParams + + class Moderation = OpenAI::Models::Moderation + + class ModerationCreateParams = OpenAI::Models::ModerationCreateParams + + class ModerationImageURLInput = OpenAI::Models::ModerationImageURLInput + + module ModerationModel = OpenAI::Models::ModerationModel + + module ModerationMultiModalInput = OpenAI::Models::ModerationMultiModalInput + + class ModerationTextInput = OpenAI::Models::ModerationTextInput + + class OtherFileChunkingStrategyObject = OpenAI::Models::OtherFileChunkingStrategyObject + + class Reasoning = OpenAI::Models::Reasoning + + module ReasoningEffort = OpenAI::Models::ReasoningEffort + + class ResponseFormatJSONObject = OpenAI::Models::ResponseFormatJSONObject + + class ResponseFormatJSONSchema = OpenAI::Models::ResponseFormatJSONSchema + + class ResponseFormatText = OpenAI::Models::ResponseFormatText + + class ResponseFormatTextGrammar = OpenAI::Models::ResponseFormatTextGrammar + + class ResponseFormatTextPython = OpenAI::Models::ResponseFormatTextPython + + module Responses = OpenAI::Models::Responses + + module ResponsesModel = OpenAI::Models::ResponsesModel + + class StaticFileChunkingStrategy = OpenAI::Models::StaticFileChunkingStrategy + + class StaticFileChunkingStrategyObject = OpenAI::Models::StaticFileChunkingStrategyObject + + class StaticFileChunkingStrategyObjectParam = OpenAI::Models::StaticFileChunkingStrategyObjectParam + + class Upload = OpenAI::Models::Upload + + class UploadCancelParams = OpenAI::Models::UploadCancelParams + + class UploadCompleteParams = OpenAI::Models::UploadCompleteParams + + class UploadCreateParams = OpenAI::Models::UploadCreateParams + + module Uploads = OpenAI::Models::Uploads + + class VectorStore = OpenAI::Models::VectorStore + + class VectorStoreCreateParams = OpenAI::Models::VectorStoreCreateParams + + class VectorStoreDeleted = OpenAI::Models::VectorStoreDeleted + + class VectorStoreDeleteParams = OpenAI::Models::VectorStoreDeleteParams + + class VectorStoreListParams = OpenAI::Models::VectorStoreListParams + + class VectorStoreRetrieveParams = OpenAI::Models::VectorStoreRetrieveParams + + module VectorStores = OpenAI::Models::VectorStores + + class VectorStoreSearchParams = OpenAI::Models::VectorStoreSearchParams + + class VectorStoreUpdateParams = OpenAI::Models::VectorStoreUpdateParams + + module Webhooks = OpenAI::Models::Webhooks +end diff --git a/sig/openai/models/all_models.rbs b/sig/openai/models/all_models.rbs new file mode 100644 index 00000000..7b1f8e0a --- /dev/null +++ b/sig/openai/models/all_models.rbs @@ -0,0 +1,43 @@ +module OpenAI + module Models + type all_models = + String + | OpenAI::Models::chat_model + | OpenAI::Models::AllModels::responses_only_model + + module AllModels + extend OpenAI::Internal::Type::Union + + type responses_only_model = + :"o1-pro" + | :"o1-pro-2025-03-19" + | :"o3-pro" + | :"o3-pro-2025-06-10" + | :"o3-deep-research" + | :"o3-deep-research-2025-06-26" + | :"o4-mini-deep-research" + | :"o4-mini-deep-research-2025-06-26" + | :"computer-use-preview" + | :"computer-use-preview-2025-03-11" + + module ResponsesOnlyModel + extend OpenAI::Internal::Type::Enum + + O1_PRO: :"o1-pro" + O1_PRO_2025_03_19: :"o1-pro-2025-03-19" + O3_PRO: :"o3-pro" + O3_PRO_2025_06_10: :"o3-pro-2025-06-10" + O3_DEEP_RESEARCH: :"o3-deep-research" + O3_DEEP_RESEARCH_2025_06_26: :"o3-deep-research-2025-06-26" + O4_MINI_DEEP_RESEARCH: :"o4-mini-deep-research" + O4_MINI_DEEP_RESEARCH_2025_06_26: :"o4-mini-deep-research-2025-06-26" + COMPUTER_USE_PREVIEW: :"computer-use-preview" + COMPUTER_USE_PREVIEW_2025_03_11: :"computer-use-preview-2025-03-11" + + def self?.values: -> ::Array[OpenAI::Models::AllModels::responses_only_model] + end + + def self?.variants: -> ::Array[OpenAI::Models::all_models] + end + end +end diff --git a/sig/openai/models/audio/speech_create_params.rbs b/sig/openai/models/audio/speech_create_params.rbs index 4a89f863..e01b1b9e 100644 --- a/sig/openai/models/audio/speech_create_params.rbs +++ b/sig/openai/models/audio/speech_create_params.rbs @@ -6,14 +6,16 @@ module OpenAI input: String, model: OpenAI::Models::Audio::SpeechCreateParams::model, voice: OpenAI::Models::Audio::SpeechCreateParams::voice, + instructions: String, response_format: OpenAI::Models::Audio::SpeechCreateParams::response_format, - speed: Float + speed: Float, + stream_format: OpenAI::Models::Audio::SpeechCreateParams::stream_format } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class SpeechCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class SpeechCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor input: String @@ -21,6 +23,10 @@ module OpenAI attr_accessor voice: OpenAI::Models::Audio::SpeechCreateParams::voice + attr_reader instructions: String? + + def instructions=: (String) -> String + attr_reader response_format: OpenAI::Models::Audio::SpeechCreateParams::response_format? def response_format=: ( @@ -31,56 +37,73 @@ module OpenAI def speed=: (Float) -> Float - def initialize: - ( - input: String, - model: OpenAI::Models::Audio::SpeechCreateParams::model, - voice: OpenAI::Models::Audio::SpeechCreateParams::voice, - response_format: OpenAI::Models::Audio::SpeechCreateParams::response_format, - speed: Float, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Audio::speech_create_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Audio::speech_create_params + attr_reader stream_format: OpenAI::Models::Audio::SpeechCreateParams::stream_format? + + def stream_format=: ( + OpenAI::Models::Audio::SpeechCreateParams::stream_format + ) -> OpenAI::Models::Audio::SpeechCreateParams::stream_format + + def initialize: ( + input: String, + model: OpenAI::Models::Audio::SpeechCreateParams::model, + voice: OpenAI::Models::Audio::SpeechCreateParams::voice, + ?instructions: String, + ?response_format: OpenAI::Models::Audio::SpeechCreateParams::response_format, + ?speed: Float, + ?stream_format: OpenAI::Models::Audio::SpeechCreateParams::stream_format, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + input: String, + model: OpenAI::Models::Audio::SpeechCreateParams::model, + voice: OpenAI::Models::Audio::SpeechCreateParams::voice, + instructions: String, + response_format: OpenAI::Models::Audio::SpeechCreateParams::response_format, + speed: Float, + stream_format: OpenAI::Models::Audio::SpeechCreateParams::stream_format, + request_options: OpenAI::RequestOptions + } type model = String | OpenAI::Models::Audio::speech_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::Audio::speech_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Audio::SpeechCreateParams::model] end type voice = - :alloy + String + | :alloy | :ash + | :ballad | :coral | :echo - | :fable - | :onyx - | :nova | :sage | :shimmer + | :verse + + module Voice + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Audio::SpeechCreateParams::voice] - class Voice < OpenAI::Enum ALLOY: :alloy ASH: :ash + BALLAD: :ballad CORAL: :coral ECHO: :echo - FABLE: :fable - ONYX: :onyx - NOVA: :nova SAGE: :sage SHIMMER: :shimmer - - def self.values: -> ::Array[OpenAI::Models::Audio::SpeechCreateParams::voice] + VERSE: :verse end type response_format = :mp3 | :opus | :aac | :flac | :wav | :pcm - class ResponseFormat < OpenAI::Enum + module ResponseFormat + extend OpenAI::Internal::Type::Enum + MP3: :mp3 OPUS: :opus AAC: :aac @@ -88,7 +111,18 @@ module OpenAI WAV: :wav PCM: :pcm - def self.values: -> ::Array[OpenAI::Models::Audio::SpeechCreateParams::response_format] + def self?.values: -> ::Array[OpenAI::Models::Audio::SpeechCreateParams::response_format] + end + + type stream_format = :sse | :audio + + module StreamFormat + extend OpenAI::Internal::Type::Enum + + SSE: :sse + AUDIO: :audio + + def self?.values: -> ::Array[OpenAI::Models::Audio::SpeechCreateParams::stream_format] end end end diff --git a/sig/openai/models/audio/speech_model.rbs b/sig/openai/models/audio/speech_model.rbs index 7ab47f1f..1dfa36a8 100644 --- a/sig/openai/models/audio/speech_model.rbs +++ b/sig/openai/models/audio/speech_model.rbs @@ -1,13 +1,16 @@ module OpenAI module Models module Audio - type speech_model = :"tts-1" | :"tts-1-hd" + type speech_model = :"tts-1" | :"tts-1-hd" | :"gpt-4o-mini-tts" + + module SpeechModel + extend OpenAI::Internal::Type::Enum - class SpeechModel < OpenAI::Enum TTS_1: :"tts-1" TTS_1_HD: :"tts-1-hd" + GPT_4O_MINI_TTS: :"gpt-4o-mini-tts" - def self.values: -> ::Array[OpenAI::Models::Audio::speech_model] + def self?.values: -> ::Array[OpenAI::Models::Audio::speech_model] end end end diff --git a/sig/openai/models/audio/transcription.rbs b/sig/openai/models/audio/transcription.rbs index 6f7b7c9f..bacd5aa7 100644 --- a/sig/openai/models/audio/transcription.rbs +++ b/sig/openai/models/audio/transcription.rbs @@ -1,18 +1,150 @@ module OpenAI module Models module Audio - type transcription = { text: String } + type transcription = + { + text: String, + logprobs: ::Array[OpenAI::Audio::Transcription::Logprob], + usage: OpenAI::Models::Audio::Transcription::usage + } - class Transcription < OpenAI::BaseModel + class Transcription < OpenAI::Internal::Type::BaseModel attr_accessor text: String - def initialize: - (text: String) -> void - | ( - ?OpenAI::Models::Audio::transcription | OpenAI::BaseModel data + attr_reader logprobs: ::Array[OpenAI::Audio::Transcription::Logprob]? + + def logprobs=: ( + ::Array[OpenAI::Audio::Transcription::Logprob] + ) -> ::Array[OpenAI::Audio::Transcription::Logprob] + + attr_reader usage: OpenAI::Models::Audio::Transcription::usage? + + def usage=: ( + OpenAI::Models::Audio::Transcription::usage + ) -> OpenAI::Models::Audio::Transcription::usage + + def initialize: ( + text: String, + ?logprobs: ::Array[OpenAI::Audio::Transcription::Logprob], + ?usage: OpenAI::Models::Audio::Transcription::usage + ) -> void + + def to_hash: -> { + text: String, + logprobs: ::Array[OpenAI::Audio::Transcription::Logprob], + usage: OpenAI::Models::Audio::Transcription::usage + } + + type logprob = { token: String, bytes: ::Array[Float], logprob: Float } + + class Logprob < OpenAI::Internal::Type::BaseModel + attr_reader token: String? + + def token=: (String) -> String + + attr_reader bytes: ::Array[Float]? + + def bytes=: (::Array[Float]) -> ::Array[Float] + + attr_reader logprob: Float? + + def logprob=: (Float) -> Float + + def initialize: ( + ?token: String, + ?bytes: ::Array[Float], + ?logprob: Float ) -> void - def to_hash: -> OpenAI::Models::Audio::transcription + def to_hash: -> { + token: String, + bytes: ::Array[Float], + logprob: Float + } + end + + type usage = + OpenAI::Audio::Transcription::Usage::Tokens + | OpenAI::Audio::Transcription::Usage::Duration + + module Usage + extend OpenAI::Internal::Type::Union + + type tokens = + { + input_tokens: Integer, + output_tokens: Integer, + total_tokens: Integer, + type: :tokens, + input_token_details: OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails + } + + class Tokens < OpenAI::Internal::Type::BaseModel + attr_accessor input_tokens: Integer + + attr_accessor output_tokens: Integer + + attr_accessor total_tokens: Integer + + attr_accessor type: :tokens + + attr_reader input_token_details: OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails? + + def input_token_details=: ( + OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails + ) -> OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails + + def initialize: ( + input_tokens: Integer, + output_tokens: Integer, + total_tokens: Integer, + ?input_token_details: OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails, + ?type: :tokens + ) -> void + + def to_hash: -> { + input_tokens: Integer, + output_tokens: Integer, + total_tokens: Integer, + type: :tokens, + input_token_details: OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails + } + + type input_token_details = + { audio_tokens: Integer, text_tokens: Integer } + + class InputTokenDetails < OpenAI::Internal::Type::BaseModel + attr_reader audio_tokens: Integer? + + def audio_tokens=: (Integer) -> Integer + + attr_reader text_tokens: Integer? + + def text_tokens=: (Integer) -> Integer + + def initialize: ( + ?audio_tokens: Integer, + ?text_tokens: Integer + ) -> void + + def to_hash: -> { audio_tokens: Integer, text_tokens: Integer } + end + end + + type duration = { seconds: Float, type: :duration } + + class Duration < OpenAI::Internal::Type::BaseModel + attr_accessor seconds: Float + + attr_accessor type: :duration + + def initialize: (seconds: Float, ?type: :duration) -> void + + def to_hash: -> { seconds: Float, type: :duration } + end + + def self?.variants: -> ::Array[OpenAI::Models::Audio::Transcription::usage] + end end end end diff --git a/sig/openai/models/audio/transcription_create_params.rbs b/sig/openai/models/audio/transcription_create_params.rbs index 3f154e80..28e08060 100644 --- a/sig/openai/models/audio/transcription_create_params.rbs +++ b/sig/openai/models/audio/transcription_create_params.rbs @@ -3,24 +3,34 @@ module OpenAI module Audio type transcription_create_params = { - file: (IO | StringIO), + file: OpenAI::Internal::file_input, model: OpenAI::Models::Audio::TranscriptionCreateParams::model, + chunking_strategy: OpenAI::Models::Audio::TranscriptionCreateParams::chunking_strategy?, + include: ::Array[OpenAI::Models::Audio::transcription_include], language: String, prompt: String, response_format: OpenAI::Models::audio_response_format, temperature: Float, timestamp_granularities: ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity] } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class TranscriptionCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - attr_accessor file: IO | StringIO + attr_accessor file: OpenAI::Internal::file_input attr_accessor model: OpenAI::Models::Audio::TranscriptionCreateParams::model + attr_accessor chunking_strategy: OpenAI::Models::Audio::TranscriptionCreateParams::chunking_strategy? + + attr_reader include: ::Array[OpenAI::Models::Audio::transcription_include]? + + def include=: ( + ::Array[OpenAI::Models::Audio::transcription_include] + ) -> ::Array[OpenAI::Models::Audio::transcription_include] + attr_reader language: String? def language=: (String) -> String @@ -45,37 +55,107 @@ module OpenAI ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity] ) -> ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity] - def initialize: - ( - file: IO | StringIO, - model: OpenAI::Models::Audio::TranscriptionCreateParams::model, - language: String, - prompt: String, - response_format: OpenAI::Models::audio_response_format, - temperature: Float, - timestamp_granularities: ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity], - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Audio::transcription_create_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Audio::transcription_create_params + def initialize: ( + file: OpenAI::Internal::file_input, + model: OpenAI::Models::Audio::TranscriptionCreateParams::model, + ?chunking_strategy: OpenAI::Models::Audio::TranscriptionCreateParams::chunking_strategy?, + ?include: ::Array[OpenAI::Models::Audio::transcription_include], + ?language: String, + ?prompt: String, + ?response_format: OpenAI::Models::audio_response_format, + ?temperature: Float, + ?timestamp_granularities: ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity], + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + file: OpenAI::Internal::file_input, + model: OpenAI::Models::Audio::TranscriptionCreateParams::model, + chunking_strategy: OpenAI::Models::Audio::TranscriptionCreateParams::chunking_strategy?, + include: ::Array[OpenAI::Models::Audio::transcription_include], + language: String, + prompt: String, + response_format: OpenAI::Models::audio_response_format, + temperature: Float, + timestamp_granularities: ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity], + request_options: OpenAI::RequestOptions + } type model = String | OpenAI::Models::audio_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::audio_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::model] + end + + type chunking_strategy = + :auto + | OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig + + module ChunkingStrategy + extend OpenAI::Internal::Type::Union + + type vad_config = + { + type: OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::type_, + prefix_padding_ms: Integer, + silence_duration_ms: Integer, + threshold: Float + } + + class VadConfig < OpenAI::Internal::Type::BaseModel + attr_accessor type: OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::type_ + + attr_reader prefix_padding_ms: Integer? + + def prefix_padding_ms=: (Integer) -> Integer + + attr_reader silence_duration_ms: Integer? + + def silence_duration_ms=: (Integer) -> Integer + + attr_reader threshold: Float? + + def threshold=: (Float) -> Float + + def initialize: ( + type: OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::type_, + ?prefix_padding_ms: Integer, + ?silence_duration_ms: Integer, + ?threshold: Float + ) -> void + + def to_hash: -> { + type: OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::type_, + prefix_padding_ms: Integer, + silence_duration_ms: Integer, + threshold: Float + } + + type type_ = :server_vad + + module Type + extend OpenAI::Internal::Type::Enum + + SERVER_VAD: :server_vad + + def self?.values: -> ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::type_] + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::chunking_strategy] end type timestamp_granularity = :word | :segment - class TimestampGranularity < OpenAI::Enum + module TimestampGranularity + extend OpenAI::Internal::Type::Enum + WORD: :word SEGMENT: :segment - def self.values: -> ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity] + def self?.values: -> ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity] end end end diff --git a/sig/openai/models/audio/transcription_create_response.rbs b/sig/openai/models/audio/transcription_create_response.rbs index bae11946..5e18958f 100644 --- a/sig/openai/models/audio/transcription_create_response.rbs +++ b/sig/openai/models/audio/transcription_create_response.rbs @@ -2,11 +2,12 @@ module OpenAI module Models module Audio type transcription_create_response = - OpenAI::Models::Audio::Transcription - | OpenAI::Models::Audio::TranscriptionVerbose + OpenAI::Audio::Transcription | OpenAI::Audio::TranscriptionVerbose - class TranscriptionCreateResponse < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Audio::Transcription], [nil, OpenAI::Models::Audio::TranscriptionVerbose]] + module TranscriptionCreateResponse + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Audio::transcription_create_response] end end end diff --git a/sig/openai/models/audio/transcription_include.rbs b/sig/openai/models/audio/transcription_include.rbs new file mode 100644 index 00000000..de5322fa --- /dev/null +++ b/sig/openai/models/audio/transcription_include.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + module Audio + type transcription_include = :logprobs + + module TranscriptionInclude + extend OpenAI::Internal::Type::Enum + + LOGPROBS: :logprobs + + def self?.values: -> ::Array[OpenAI::Models::Audio::transcription_include] + end + end + end +end diff --git a/sig/openai/models/audio/transcription_segment.rbs b/sig/openai/models/audio/transcription_segment.rbs index a721f545..82da7095 100644 --- a/sig/openai/models/audio/transcription_segment.rbs +++ b/sig/openai/models/audio/transcription_segment.rbs @@ -15,7 +15,7 @@ module OpenAI tokens: ::Array[Integer] } - class TranscriptionSegment < OpenAI::BaseModel + class TranscriptionSegment < OpenAI::Internal::Type::BaseModel attr_accessor id: Integer attr_accessor avg_logprob: Float @@ -36,25 +36,31 @@ module OpenAI attr_accessor tokens: ::Array[Integer] - def initialize: - ( - id: Integer, - avg_logprob: Float, - compression_ratio: Float, - end_: Float, - no_speech_prob: Float, - seek: Integer, - start: Float, - temperature: Float, - text: String, - tokens: ::Array[Integer] - ) -> void - | ( - ?OpenAI::Models::Audio::transcription_segment - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Audio::transcription_segment + def initialize: ( + id: Integer, + avg_logprob: Float, + compression_ratio: Float, + end_: Float, + no_speech_prob: Float, + seek: Integer, + start: Float, + temperature: Float, + text: String, + tokens: ::Array[Integer] + ) -> void + + def to_hash: -> { + id: Integer, + avg_logprob: Float, + compression_ratio: Float, + end_: Float, + no_speech_prob: Float, + seek: Integer, + start: Float, + temperature: Float, + text: String, + tokens: ::Array[Integer] + } end end end diff --git a/sig/openai/models/audio/transcription_stream_event.rbs b/sig/openai/models/audio/transcription_stream_event.rbs new file mode 100644 index 00000000..f6c55919 --- /dev/null +++ b/sig/openai/models/audio/transcription_stream_event.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + module Audio + type transcription_stream_event = + OpenAI::Audio::TranscriptionTextDeltaEvent + | OpenAI::Audio::TranscriptionTextDoneEvent + + module TranscriptionStreamEvent + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Audio::transcription_stream_event] + end + end + end +end diff --git a/sig/openai/models/audio/transcription_text_delta_event.rbs b/sig/openai/models/audio/transcription_text_delta_event.rbs new file mode 100644 index 00000000..08280006 --- /dev/null +++ b/sig/openai/models/audio/transcription_text_delta_event.rbs @@ -0,0 +1,65 @@ +module OpenAI + module Models + module Audio + type transcription_text_delta_event = + { + delta: String, + type: :"transcript.text.delta", + logprobs: ::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob] + } + + class TranscriptionTextDeltaEvent < OpenAI::Internal::Type::BaseModel + attr_accessor delta: String + + attr_accessor type: :"transcript.text.delta" + + attr_reader logprobs: ::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob]? + + def logprobs=: ( + ::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob] + ) -> ::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob] + + def initialize: ( + delta: String, + ?logprobs: ::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob], + ?type: :"transcript.text.delta" + ) -> void + + def to_hash: -> { + delta: String, + type: :"transcript.text.delta", + logprobs: ::Array[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob] + } + + type logprob = + { token: String, bytes: ::Array[Integer], logprob: Float } + + class Logprob < OpenAI::Internal::Type::BaseModel + attr_reader token: String? + + def token=: (String) -> String + + attr_reader bytes: ::Array[Integer]? + + def bytes=: (::Array[Integer]) -> ::Array[Integer] + + attr_reader logprob: Float? + + def logprob=: (Float) -> Float + + def initialize: ( + ?token: String, + ?bytes: ::Array[Integer], + ?logprob: Float + ) -> void + + def to_hash: -> { + token: String, + bytes: ::Array[Integer], + logprob: Float + } + end + end + end + end +end diff --git a/sig/openai/models/audio/transcription_text_done_event.rbs b/sig/openai/models/audio/transcription_text_done_event.rbs new file mode 100644 index 00000000..b2a14db4 --- /dev/null +++ b/sig/openai/models/audio/transcription_text_done_event.rbs @@ -0,0 +1,135 @@ +module OpenAI + module Models + module Audio + type transcription_text_done_event = + { + text: String, + type: :"transcript.text.done", + logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob], + usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage + } + + class TranscriptionTextDoneEvent < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :"transcript.text.done" + + attr_reader logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]? + + def logprobs=: ( + ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob] + ) -> ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob] + + attr_reader usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage? + + def usage=: ( + OpenAI::Audio::TranscriptionTextDoneEvent::Usage + ) -> OpenAI::Audio::TranscriptionTextDoneEvent::Usage + + def initialize: ( + text: String, + ?logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob], + ?usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage, + ?type: :"transcript.text.done" + ) -> void + + def to_hash: -> { + text: String, + type: :"transcript.text.done", + logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob], + usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage + } + + type logprob = + { token: String, bytes: ::Array[Integer], logprob: Float } + + class Logprob < OpenAI::Internal::Type::BaseModel + attr_reader token: String? + + def token=: (String) -> String + + attr_reader bytes: ::Array[Integer]? + + def bytes=: (::Array[Integer]) -> ::Array[Integer] + + attr_reader logprob: Float? + + def logprob=: (Float) -> Float + + def initialize: ( + ?token: String, + ?bytes: ::Array[Integer], + ?logprob: Float + ) -> void + + def to_hash: -> { + token: String, + bytes: ::Array[Integer], + logprob: Float + } + end + + type usage = + { + input_tokens: Integer, + output_tokens: Integer, + total_tokens: Integer, + type: :tokens, + input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails + } + + class Usage < OpenAI::Internal::Type::BaseModel + attr_accessor input_tokens: Integer + + attr_accessor output_tokens: Integer + + attr_accessor total_tokens: Integer + + attr_accessor type: :tokens + + attr_reader input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails? + + def input_token_details=: ( + OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails + ) -> OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails + + def initialize: ( + input_tokens: Integer, + output_tokens: Integer, + total_tokens: Integer, + ?input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails, + ?type: :tokens + ) -> void + + def to_hash: -> { + input_tokens: Integer, + output_tokens: Integer, + total_tokens: Integer, + type: :tokens, + input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails + } + + type input_token_details = + { audio_tokens: Integer, text_tokens: Integer } + + class InputTokenDetails < OpenAI::Internal::Type::BaseModel + attr_reader audio_tokens: Integer? + + def audio_tokens=: (Integer) -> Integer + + attr_reader text_tokens: Integer? + + def text_tokens=: (Integer) -> Integer + + def initialize: ( + ?audio_tokens: Integer, + ?text_tokens: Integer + ) -> void + + def to_hash: -> { audio_tokens: Integer, text_tokens: Integer } + end + end + end + end + end +end diff --git a/sig/openai/models/audio/transcription_verbose.rbs b/sig/openai/models/audio/transcription_verbose.rbs index 268831d6..c84b3539 100644 --- a/sig/openai/models/audio/transcription_verbose.rbs +++ b/sig/openai/models/audio/transcription_verbose.rbs @@ -6,43 +6,65 @@ module OpenAI duration: Float, language: String, text: String, - segments: ::Array[OpenAI::Models::Audio::TranscriptionSegment], - words: ::Array[OpenAI::Models::Audio::TranscriptionWord] + segments: ::Array[OpenAI::Audio::TranscriptionSegment], + usage: OpenAI::Audio::TranscriptionVerbose::Usage, + words: ::Array[OpenAI::Audio::TranscriptionWord] } - class TranscriptionVerbose < OpenAI::BaseModel + class TranscriptionVerbose < OpenAI::Internal::Type::BaseModel attr_accessor duration: Float attr_accessor language: String attr_accessor text: String - attr_reader segments: ::Array[OpenAI::Models::Audio::TranscriptionSegment]? + attr_reader segments: ::Array[OpenAI::Audio::TranscriptionSegment]? def segments=: ( - ::Array[OpenAI::Models::Audio::TranscriptionSegment] - ) -> ::Array[OpenAI::Models::Audio::TranscriptionSegment] + ::Array[OpenAI::Audio::TranscriptionSegment] + ) -> ::Array[OpenAI::Audio::TranscriptionSegment] - attr_reader words: ::Array[OpenAI::Models::Audio::TranscriptionWord]? + attr_reader usage: OpenAI::Audio::TranscriptionVerbose::Usage? + + def usage=: ( + OpenAI::Audio::TranscriptionVerbose::Usage + ) -> OpenAI::Audio::TranscriptionVerbose::Usage + + attr_reader words: ::Array[OpenAI::Audio::TranscriptionWord]? def words=: ( - ::Array[OpenAI::Models::Audio::TranscriptionWord] - ) -> ::Array[OpenAI::Models::Audio::TranscriptionWord] - - def initialize: - ( - duration: Float, - language: String, - text: String, - segments: ::Array[OpenAI::Models::Audio::TranscriptionSegment], - words: ::Array[OpenAI::Models::Audio::TranscriptionWord] - ) -> void - | ( - ?OpenAI::Models::Audio::transcription_verbose - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Audio::transcription_verbose + ::Array[OpenAI::Audio::TranscriptionWord] + ) -> ::Array[OpenAI::Audio::TranscriptionWord] + + def initialize: ( + duration: Float, + language: String, + text: String, + ?segments: ::Array[OpenAI::Audio::TranscriptionSegment], + ?usage: OpenAI::Audio::TranscriptionVerbose::Usage, + ?words: ::Array[OpenAI::Audio::TranscriptionWord] + ) -> void + + def to_hash: -> { + duration: Float, + language: String, + text: String, + segments: ::Array[OpenAI::Audio::TranscriptionSegment], + usage: OpenAI::Audio::TranscriptionVerbose::Usage, + words: ::Array[OpenAI::Audio::TranscriptionWord] + } + + type usage = { seconds: Float, type: :duration } + + class Usage < OpenAI::Internal::Type::BaseModel + attr_accessor seconds: Float + + attr_accessor type: :duration + + def initialize: (seconds: Float, ?type: :duration) -> void + + def to_hash: -> { seconds: Float, type: :duration } + end end end end diff --git a/sig/openai/models/audio/transcription_word.rbs b/sig/openai/models/audio/transcription_word.rbs index 17a2ca32..1bd7d752 100644 --- a/sig/openai/models/audio/transcription_word.rbs +++ b/sig/openai/models/audio/transcription_word.rbs @@ -3,20 +3,16 @@ module OpenAI module Audio type transcription_word = { end_: Float, start: Float, word: String } - class TranscriptionWord < OpenAI::BaseModel + class TranscriptionWord < OpenAI::Internal::Type::BaseModel attr_accessor end_: Float attr_accessor start: Float attr_accessor word: String - def initialize: - (end_: Float, start: Float, word: String) -> void - | ( - ?OpenAI::Models::Audio::transcription_word | OpenAI::BaseModel data - ) -> void + def initialize: (end_: Float, start: Float, word: String) -> void - def to_hash: -> OpenAI::Models::Audio::transcription_word + def to_hash: -> { end_: Float, start: Float, word: String } end end end diff --git a/sig/openai/models/audio/translation.rbs b/sig/openai/models/audio/translation.rbs index e16987a3..832a27d3 100644 --- a/sig/openai/models/audio/translation.rbs +++ b/sig/openai/models/audio/translation.rbs @@ -3,16 +3,12 @@ module OpenAI module Audio type translation = { text: String } - class Translation < OpenAI::BaseModel + class Translation < OpenAI::Internal::Type::BaseModel attr_accessor text: String - def initialize: - (text: String) -> void - | ( - ?OpenAI::Models::Audio::translation | OpenAI::BaseModel data - ) -> void + def initialize: (text: String) -> void - def to_hash: -> OpenAI::Models::Audio::translation + def to_hash: -> { text: String } end end end diff --git a/sig/openai/models/audio/translation_create_params.rbs b/sig/openai/models/audio/translation_create_params.rbs index 3dd3ee9c..da4cbb3c 100644 --- a/sig/openai/models/audio/translation_create_params.rbs +++ b/sig/openai/models/audio/translation_create_params.rbs @@ -3,19 +3,19 @@ module OpenAI module Audio type translation_create_params = { - file: (IO | StringIO), + file: OpenAI::Internal::file_input, model: OpenAI::Models::Audio::TranslationCreateParams::model, prompt: String, - response_format: OpenAI::Models::audio_response_format, + response_format: OpenAI::Models::Audio::TranslationCreateParams::response_format, temperature: Float } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class TranslationCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class TranslationCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - attr_accessor file: IO | StringIO + attr_accessor file: OpenAI::Internal::file_input attr_accessor model: OpenAI::Models::Audio::TranslationCreateParams::model @@ -23,36 +23,54 @@ module OpenAI def prompt=: (String) -> String - attr_reader response_format: OpenAI::Models::audio_response_format? + attr_reader response_format: OpenAI::Models::Audio::TranslationCreateParams::response_format? def response_format=: ( - OpenAI::Models::audio_response_format - ) -> OpenAI::Models::audio_response_format + OpenAI::Models::Audio::TranslationCreateParams::response_format + ) -> OpenAI::Models::Audio::TranslationCreateParams::response_format attr_reader temperature: Float? def temperature=: (Float) -> Float - def initialize: - ( - file: IO | StringIO, - model: OpenAI::Models::Audio::TranslationCreateParams::model, - prompt: String, - response_format: OpenAI::Models::audio_response_format, - temperature: Float, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Audio::translation_create_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Audio::translation_create_params + def initialize: ( + file: OpenAI::Internal::file_input, + model: OpenAI::Models::Audio::TranslationCreateParams::model, + ?prompt: String, + ?response_format: OpenAI::Models::Audio::TranslationCreateParams::response_format, + ?temperature: Float, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + file: OpenAI::Internal::file_input, + model: OpenAI::Models::Audio::TranslationCreateParams::model, + prompt: String, + response_format: OpenAI::Models::Audio::TranslationCreateParams::response_format, + temperature: Float, + request_options: OpenAI::RequestOptions + } type model = String | OpenAI::Models::audio_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::audio_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Audio::TranslationCreateParams::model] + end + + type response_format = :json | :text | :srt | :verbose_json | :vtt + + module ResponseFormat + extend OpenAI::Internal::Type::Enum + + JSON: :json + TEXT: :text + SRT: :srt + VERBOSE_JSON: :verbose_json + VTT: :vtt + + def self?.values: -> ::Array[OpenAI::Models::Audio::TranslationCreateParams::response_format] end end end diff --git a/sig/openai/models/audio/translation_create_response.rbs b/sig/openai/models/audio/translation_create_response.rbs index a8516dab..a792f349 100644 --- a/sig/openai/models/audio/translation_create_response.rbs +++ b/sig/openai/models/audio/translation_create_response.rbs @@ -2,11 +2,12 @@ module OpenAI module Models module Audio type translation_create_response = - OpenAI::Models::Audio::Translation - | OpenAI::Models::Audio::TranslationVerbose + OpenAI::Audio::Translation | OpenAI::Audio::TranslationVerbose - class TranslationCreateResponse < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Audio::Translation], [nil, OpenAI::Models::Audio::TranslationVerbose]] + module TranslationCreateResponse + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Audio::translation_create_response] end end end diff --git a/sig/openai/models/audio/translation_verbose.rbs b/sig/openai/models/audio/translation_verbose.rbs index 0b2aeae7..cac25f84 100644 --- a/sig/openai/models/audio/translation_verbose.rbs +++ b/sig/openai/models/audio/translation_verbose.rbs @@ -6,34 +6,35 @@ module OpenAI duration: Float, language: String, text: String, - segments: ::Array[OpenAI::Models::Audio::TranscriptionSegment] + segments: ::Array[OpenAI::Audio::TranscriptionSegment] } - class TranslationVerbose < OpenAI::BaseModel + class TranslationVerbose < OpenAI::Internal::Type::BaseModel attr_accessor duration: Float attr_accessor language: String attr_accessor text: String - attr_reader segments: ::Array[OpenAI::Models::Audio::TranscriptionSegment]? + attr_reader segments: ::Array[OpenAI::Audio::TranscriptionSegment]? def segments=: ( - ::Array[OpenAI::Models::Audio::TranscriptionSegment] - ) -> ::Array[OpenAI::Models::Audio::TranscriptionSegment] + ::Array[OpenAI::Audio::TranscriptionSegment] + ) -> ::Array[OpenAI::Audio::TranscriptionSegment] - def initialize: - ( - duration: Float, - language: String, - text: String, - segments: ::Array[OpenAI::Models::Audio::TranscriptionSegment] - ) -> void - | ( - ?OpenAI::Models::Audio::translation_verbose | OpenAI::BaseModel data - ) -> void + def initialize: ( + duration: Float, + language: String, + text: String, + ?segments: ::Array[OpenAI::Audio::TranscriptionSegment] + ) -> void - def to_hash: -> OpenAI::Models::Audio::translation_verbose + def to_hash: -> { + duration: Float, + language: String, + text: String, + segments: ::Array[OpenAI::Audio::TranscriptionSegment] + } end end end diff --git a/sig/openai/models/audio_model.rbs b/sig/openai/models/audio_model.rbs index f9841d31..4a294e19 100644 --- a/sig/openai/models/audio_model.rbs +++ b/sig/openai/models/audio_model.rbs @@ -1,11 +1,16 @@ module OpenAI module Models - type audio_model = :"whisper-1" + type audio_model = + :"whisper-1" | :"gpt-4o-transcribe" | :"gpt-4o-mini-transcribe" + + module AudioModel + extend OpenAI::Internal::Type::Enum - class AudioModel < OpenAI::Enum WHISPER_1: :"whisper-1" + GPT_4O_TRANSCRIBE: :"gpt-4o-transcribe" + GPT_4O_MINI_TRANSCRIBE: :"gpt-4o-mini-transcribe" - def self.values: -> ::Array[OpenAI::Models::audio_model] + def self?.values: -> ::Array[OpenAI::Models::audio_model] end end end diff --git a/sig/openai/models/audio_response_format.rbs b/sig/openai/models/audio_response_format.rbs index e91a52b8..39091918 100644 --- a/sig/openai/models/audio_response_format.rbs +++ b/sig/openai/models/audio_response_format.rbs @@ -2,14 +2,16 @@ module OpenAI module Models type audio_response_format = :json | :text | :srt | :verbose_json | :vtt - class AudioResponseFormat < OpenAI::Enum + module AudioResponseFormat + extend OpenAI::Internal::Type::Enum + JSON: :json TEXT: :text SRT: :srt VERBOSE_JSON: :verbose_json VTT: :vtt - def self.values: -> ::Array[OpenAI::Models::audio_response_format] + def self?.values: -> ::Array[OpenAI::Models::audio_response_format] end end end diff --git a/sig/openai/models/auto_file_chunking_strategy_param.rbs b/sig/openai/models/auto_file_chunking_strategy_param.rbs index 85817088..54aeed24 100644 --- a/sig/openai/models/auto_file_chunking_strategy_param.rbs +++ b/sig/openai/models/auto_file_chunking_strategy_param.rbs @@ -2,17 +2,12 @@ module OpenAI module Models type auto_file_chunking_strategy_param = { type: :auto } - class AutoFileChunkingStrategyParam < OpenAI::BaseModel + class AutoFileChunkingStrategyParam < OpenAI::Internal::Type::BaseModel attr_accessor type: :auto - def initialize: - (type: :auto) -> void - | ( - ?OpenAI::Models::auto_file_chunking_strategy_param - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :auto) -> void - def to_hash: -> OpenAI::Models::auto_file_chunking_strategy_param + def to_hash: -> { type: :auto } end end end diff --git a/sig/openai/models/batch.rbs b/sig/openai/models/batch.rbs index 97e29c54..cdba6cdc 100644 --- a/sig/openai/models/batch.rbs +++ b/sig/openai/models/batch.rbs @@ -13,7 +13,7 @@ module OpenAI cancelling_at: Integer, completed_at: Integer, error_file_id: String, - errors: OpenAI::Models::Batch::Errors, + errors: OpenAI::Batch::Errors, expired_at: Integer, expires_at: Integer, failed_at: Integer, @@ -21,10 +21,10 @@ module OpenAI in_progress_at: Integer, metadata: OpenAI::Models::metadata?, output_file_id: String, - request_counts: OpenAI::Models::BatchRequestCounts + request_counts: OpenAI::BatchRequestCounts } - class Batch < OpenAI::BaseModel + class Batch < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor completion_window: String @@ -55,11 +55,9 @@ module OpenAI def error_file_id=: (String) -> String - attr_reader errors: OpenAI::Models::Batch::Errors? + attr_reader errors: OpenAI::Batch::Errors? - def errors=: ( - OpenAI::Models::Batch::Errors - ) -> OpenAI::Models::Batch::Errors + def errors=: (OpenAI::Batch::Errors) -> OpenAI::Batch::Errors attr_reader expired_at: Integer? @@ -87,38 +85,57 @@ module OpenAI def output_file_id=: (String) -> String - attr_reader request_counts: OpenAI::Models::BatchRequestCounts? + attr_reader request_counts: OpenAI::BatchRequestCounts? def request_counts=: ( - OpenAI::Models::BatchRequestCounts - ) -> OpenAI::Models::BatchRequestCounts - - def initialize: - ( - id: String, - completion_window: String, - created_at: Integer, - endpoint: String, - input_file_id: String, - status: OpenAI::Models::Batch::status, - cancelled_at: Integer, - cancelling_at: Integer, - completed_at: Integer, - error_file_id: String, - errors: OpenAI::Models::Batch::Errors, - expired_at: Integer, - expires_at: Integer, - failed_at: Integer, - finalizing_at: Integer, - in_progress_at: Integer, - metadata: OpenAI::Models::metadata?, - output_file_id: String, - request_counts: OpenAI::Models::BatchRequestCounts, - object: :batch - ) -> void - | (?OpenAI::Models::batch | OpenAI::BaseModel data) -> void + OpenAI::BatchRequestCounts + ) -> OpenAI::BatchRequestCounts - def to_hash: -> OpenAI::Models::batch + def initialize: ( + id: String, + completion_window: String, + created_at: Integer, + endpoint: String, + input_file_id: String, + status: OpenAI::Models::Batch::status, + ?cancelled_at: Integer, + ?cancelling_at: Integer, + ?completed_at: Integer, + ?error_file_id: String, + ?errors: OpenAI::Batch::Errors, + ?expired_at: Integer, + ?expires_at: Integer, + ?failed_at: Integer, + ?finalizing_at: Integer, + ?in_progress_at: Integer, + ?metadata: OpenAI::Models::metadata?, + ?output_file_id: String, + ?request_counts: OpenAI::BatchRequestCounts, + ?object: :batch + ) -> void + + def to_hash: -> { + id: String, + completion_window: String, + created_at: Integer, + endpoint: String, + input_file_id: String, + object: :batch, + status: OpenAI::Models::Batch::status, + cancelled_at: Integer, + cancelling_at: Integer, + completed_at: Integer, + error_file_id: String, + errors: OpenAI::Batch::Errors, + expired_at: Integer, + expires_at: Integer, + failed_at: Integer, + finalizing_at: Integer, + in_progress_at: Integer, + metadata: OpenAI::Models::metadata?, + output_file_id: String, + request_counts: OpenAI::BatchRequestCounts + } type status = :validating @@ -130,7 +147,9 @@ module OpenAI | :cancelling | :cancelled - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + VALIDATING: :validating FAILED: :failed IN_PROGRESS: :in_progress @@ -140,28 +159,26 @@ module OpenAI CANCELLING: :cancelling CANCELLED: :cancelled - def self.values: -> ::Array[OpenAI::Models::Batch::status] + def self?.values: -> ::Array[OpenAI::Models::Batch::status] end - type errors = - { data: ::Array[OpenAI::Models::BatchError], object: String } + type errors = { data: ::Array[OpenAI::BatchError], object: String } - class Errors < OpenAI::BaseModel - attr_reader data: ::Array[OpenAI::Models::BatchError]? + class Errors < OpenAI::Internal::Type::BaseModel + attr_reader data: ::Array[OpenAI::BatchError]? - def data=: ( - ::Array[OpenAI::Models::BatchError] - ) -> ::Array[OpenAI::Models::BatchError] + def data=: (::Array[OpenAI::BatchError]) -> ::Array[OpenAI::BatchError] attr_reader object: String? def object=: (String) -> String - def initialize: - (data: ::Array[OpenAI::Models::BatchError], object: String) -> void - | (?OpenAI::Models::Batch::errors | OpenAI::BaseModel data) -> void + def initialize: ( + ?data: ::Array[OpenAI::BatchError], + ?object: String + ) -> void - def to_hash: -> OpenAI::Models::Batch::errors + def to_hash: -> { data: ::Array[OpenAI::BatchError], object: String } end end end diff --git a/sig/openai/models/batch_cancel_params.rbs b/sig/openai/models/batch_cancel_params.rbs index 793ea0a9..944d9c29 100644 --- a/sig/openai/models/batch_cancel_params.rbs +++ b/sig/openai/models/batch_cancel_params.rbs @@ -1,18 +1,14 @@ module OpenAI module Models - type batch_cancel_params = { } & OpenAI::request_parameters + type batch_cancel_params = { } & OpenAI::Internal::Type::request_parameters - class BatchCancelParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class BatchCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::batch_cancel_params | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::batch_cancel_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/batch_create_params.rbs b/sig/openai/models/batch_create_params.rbs index 2f317b07..3b5ff7a8 100644 --- a/sig/openai/models/batch_create_params.rbs +++ b/sig/openai/models/batch_create_params.rbs @@ -5,13 +5,14 @@ module OpenAI completion_window: OpenAI::Models::BatchCreateParams::completion_window, endpoint: OpenAI::Models::BatchCreateParams::endpoint, input_file_id: String, - metadata: OpenAI::Models::metadata? + metadata: OpenAI::Models::metadata?, + output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class BatchCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class BatchCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor completion_window: OpenAI::Models::BatchCreateParams::completion_window @@ -21,37 +22,67 @@ module OpenAI attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - completion_window: OpenAI::Models::BatchCreateParams::completion_window, - endpoint: OpenAI::Models::BatchCreateParams::endpoint, - input_file_id: String, - metadata: OpenAI::Models::metadata?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::batch_create_params | OpenAI::BaseModel data - ) -> void + attr_reader output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter? - def to_hash: -> OpenAI::Models::batch_create_params + def output_expires_after=: ( + OpenAI::BatchCreateParams::OutputExpiresAfter + ) -> OpenAI::BatchCreateParams::OutputExpiresAfter + + def initialize: ( + completion_window: OpenAI::Models::BatchCreateParams::completion_window, + endpoint: OpenAI::Models::BatchCreateParams::endpoint, + input_file_id: String, + ?metadata: OpenAI::Models::metadata?, + ?output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + completion_window: OpenAI::Models::BatchCreateParams::completion_window, + endpoint: OpenAI::Models::BatchCreateParams::endpoint, + input_file_id: String, + metadata: OpenAI::Models::metadata?, + output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter, + request_options: OpenAI::RequestOptions + } type completion_window = :"24h" - class CompletionWindow < OpenAI::Enum - NUMBER_24H: :"24h" + module CompletionWindow + extend OpenAI::Internal::Type::Enum - def self.values: -> ::Array[OpenAI::Models::BatchCreateParams::completion_window] + COMPLETION_WINDOW_24H: :"24h" + + def self?.values: -> ::Array[OpenAI::Models::BatchCreateParams::completion_window] end type endpoint = - :"/v1/chat/completions" | :"/v1/embeddings" | :"/v1/completions" + :"/v1/responses" + | :"/v1/chat/completions" + | :"/v1/embeddings" + | :"/v1/completions" + + module Endpoint + extend OpenAI::Internal::Type::Enum - class Endpoint < OpenAI::Enum + V1_RESPONSES: :"/v1/responses" V1_CHAT_COMPLETIONS: :"/v1/chat/completions" V1_EMBEDDINGS: :"/v1/embeddings" V1_COMPLETIONS: :"/v1/completions" - def self.values: -> ::Array[OpenAI::Models::BatchCreateParams::endpoint] + def self?.values: -> ::Array[OpenAI::Models::BatchCreateParams::endpoint] + end + + type output_expires_after = { anchor: :created_at, seconds: Integer } + + class OutputExpiresAfter < OpenAI::Internal::Type::BaseModel + attr_accessor anchor: :created_at + + attr_accessor seconds: Integer + + def initialize: (seconds: Integer, ?anchor: :created_at) -> void + + def to_hash: -> { anchor: :created_at, seconds: Integer } end end end diff --git a/sig/openai/models/batch_error.rbs b/sig/openai/models/batch_error.rbs index 4803b3cd..028bc05a 100644 --- a/sig/openai/models/batch_error.rbs +++ b/sig/openai/models/batch_error.rbs @@ -3,7 +3,7 @@ module OpenAI type batch_error = { code: String, line: Integer?, message: String, param: String? } - class BatchError < OpenAI::BaseModel + class BatchError < OpenAI::Internal::Type::BaseModel attr_reader code: String? def code=: (String) -> String @@ -16,11 +16,19 @@ module OpenAI attr_accessor param: String? - def initialize: - (code: String, line: Integer?, message: String, param: String?) -> void - | (?OpenAI::Models::batch_error | OpenAI::BaseModel data) -> void - - def to_hash: -> OpenAI::Models::batch_error + def initialize: ( + ?code: String, + ?line: Integer?, + ?message: String, + ?param: String? + ) -> void + + def to_hash: -> { + code: String, + line: Integer?, + message: String, + param: String? + } end end end diff --git a/sig/openai/models/batch_list_params.rbs b/sig/openai/models/batch_list_params.rbs index e1265113..59ce8fb7 100644 --- a/sig/openai/models/batch_list_params.rbs +++ b/sig/openai/models/batch_list_params.rbs @@ -1,11 +1,12 @@ module OpenAI module Models type batch_list_params = - { after: String, limit: Integer } & OpenAI::request_parameters + { after: String, limit: Integer } + & OpenAI::Internal::Type::request_parameters - class BatchListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class BatchListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -15,15 +16,17 @@ module OpenAI def limit=: (Integer) -> Integer - def initialize: - ( - after: String, - limit: Integer, - request_options: OpenAI::request_opts - ) -> void - | (?OpenAI::Models::batch_list_params | OpenAI::BaseModel data) -> void + def initialize: ( + ?after: String, + ?limit: Integer, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::batch_list_params + def to_hash: -> { + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/batch_request_counts.rbs b/sig/openai/models/batch_request_counts.rbs index d2a56b3b..d8013508 100644 --- a/sig/openai/models/batch_request_counts.rbs +++ b/sig/openai/models/batch_request_counts.rbs @@ -3,20 +3,20 @@ module OpenAI type batch_request_counts = { completed: Integer, failed: Integer, total: Integer } - class BatchRequestCounts < OpenAI::BaseModel + class BatchRequestCounts < OpenAI::Internal::Type::BaseModel attr_accessor completed: Integer attr_accessor failed: Integer attr_accessor total: Integer - def initialize: - (completed: Integer, failed: Integer, total: Integer) -> void - | ( - ?OpenAI::Models::batch_request_counts | OpenAI::BaseModel data - ) -> void + def initialize: ( + completed: Integer, + failed: Integer, + total: Integer + ) -> void - def to_hash: -> OpenAI::Models::batch_request_counts + def to_hash: -> { completed: Integer, failed: Integer, total: Integer } end end end diff --git a/sig/openai/models/batch_retrieve_params.rbs b/sig/openai/models/batch_retrieve_params.rbs index e3af551a..232d9ed7 100644 --- a/sig/openai/models/batch_retrieve_params.rbs +++ b/sig/openai/models/batch_retrieve_params.rbs @@ -1,18 +1,15 @@ module OpenAI module Models - type batch_retrieve_params = { } & OpenAI::request_parameters + type batch_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters - class BatchRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class BatchRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::batch_retrieve_params | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::batch_retrieve_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/beta/assistant.rbs b/sig/openai/models/beta/assistant.rbs index 5e7d12d3..3128e9ba 100644 --- a/sig/openai/models/beta/assistant.rbs +++ b/sig/openai/models/beta/assistant.rbs @@ -14,11 +14,11 @@ module OpenAI tools: ::Array[OpenAI::Models::Beta::assistant_tool], response_format: OpenAI::Models::Beta::assistant_response_format_option?, temperature: Float?, - tool_resources: OpenAI::Models::Beta::Assistant::ToolResources?, + tool_resources: OpenAI::Beta::Assistant::ToolResources?, top_p: Float? } - class Assistant < OpenAI::BaseModel + class Assistant < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created_at: Integer @@ -41,93 +41,93 @@ module OpenAI attr_accessor temperature: Float? - attr_accessor tool_resources: OpenAI::Models::Beta::Assistant::ToolResources? + attr_accessor tool_resources: OpenAI::Beta::Assistant::ToolResources? attr_accessor top_p: Float? - def initialize: - ( - id: String, - created_at: Integer, - description: String?, - instructions: String?, - metadata: OpenAI::Models::metadata?, - model: String, - name: String?, - tools: ::Array[OpenAI::Models::Beta::assistant_tool], - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - tool_resources: OpenAI::Models::Beta::Assistant::ToolResources?, - top_p: Float?, - object: :assistant - ) -> void - | (?OpenAI::Models::Beta::assistant | OpenAI::BaseModel data) -> void - - def to_hash: -> OpenAI::Models::Beta::assistant + def initialize: ( + id: String, + created_at: Integer, + description: String?, + instructions: String?, + metadata: OpenAI::Models::metadata?, + model: String, + name: String?, + tools: ::Array[OpenAI::Models::Beta::assistant_tool], + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?tool_resources: OpenAI::Beta::Assistant::ToolResources?, + ?top_p: Float?, + ?object: :assistant + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + description: String?, + instructions: String?, + metadata: OpenAI::Models::metadata?, + model: String, + name: String?, + object: :assistant, + tools: ::Array[OpenAI::Models::Beta::assistant_tool], + response_format: OpenAI::Models::Beta::assistant_response_format_option?, + temperature: Float?, + tool_resources: OpenAI::Beta::Assistant::ToolResources?, + top_p: Float? + } type tool_resources = { - code_interpreter: OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::Assistant::ToolResources::FileSearch + code_interpreter: OpenAI::Beta::Assistant::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::Assistant::ToolResources::FileSearch } - class ToolResources < OpenAI::BaseModel - attr_reader code_interpreter: OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter? + class ToolResources < OpenAI::Internal::Type::BaseModel + attr_reader code_interpreter: OpenAI::Beta::Assistant::ToolResources::CodeInterpreter? def code_interpreter=: ( - OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter - ) -> OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter + OpenAI::Beta::Assistant::ToolResources::CodeInterpreter + ) -> OpenAI::Beta::Assistant::ToolResources::CodeInterpreter - attr_reader file_search: OpenAI::Models::Beta::Assistant::ToolResources::FileSearch? + attr_reader file_search: OpenAI::Beta::Assistant::ToolResources::FileSearch? def file_search=: ( - OpenAI::Models::Beta::Assistant::ToolResources::FileSearch - ) -> OpenAI::Models::Beta::Assistant::ToolResources::FileSearch + OpenAI::Beta::Assistant::ToolResources::FileSearch + ) -> OpenAI::Beta::Assistant::ToolResources::FileSearch - def initialize: - ( - code_interpreter: OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::Assistant::ToolResources::FileSearch - ) -> void - | ( - ?OpenAI::Models::Beta::Assistant::tool_resources - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?code_interpreter: OpenAI::Beta::Assistant::ToolResources::CodeInterpreter, + ?file_search: OpenAI::Beta::Assistant::ToolResources::FileSearch + ) -> void - def to_hash: -> OpenAI::Models::Beta::Assistant::tool_resources + def to_hash: -> { + code_interpreter: OpenAI::Beta::Assistant::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::Assistant::ToolResources::FileSearch + } type code_interpreter = { file_ids: ::Array[String] } - class CodeInterpreter < OpenAI::BaseModel + class CodeInterpreter < OpenAI::Internal::Type::BaseModel attr_reader file_ids: ::Array[String]? def file_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (file_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::Assistant::ToolResources::code_interpreter - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::Assistant::ToolResources::code_interpreter + def to_hash: -> { file_ids: ::Array[String] } end type file_search = { vector_store_ids: ::Array[String] } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_reader vector_store_ids: ::Array[String]? def vector_store_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (vector_store_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::Assistant::ToolResources::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: (?vector_store_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::Assistant::ToolResources::file_search + def to_hash: -> { vector_store_ids: ::Array[String] } end end end diff --git a/sig/openai/models/beta/assistant_create_params.rbs b/sig/openai/models/beta/assistant_create_params.rbs index 63ed4e29..f4bdd80f 100644 --- a/sig/openai/models/beta/assistant_create_params.rbs +++ b/sig/openai/models/beta/assistant_create_params.rbs @@ -11,15 +11,15 @@ module OpenAI reasoning_effort: OpenAI::Models::reasoning_effort?, response_format: OpenAI::Models::Beta::assistant_response_format_option?, temperature: Float?, - tool_resources: OpenAI::Models::Beta::AssistantCreateParams::ToolResources?, + tool_resources: OpenAI::Beta::AssistantCreateParams::ToolResources?, tools: ::Array[OpenAI::Models::Beta::assistant_tool], top_p: Float? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class AssistantCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class AssistantCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor model: OpenAI::Models::Beta::AssistantCreateParams::model @@ -37,7 +37,7 @@ module OpenAI attr_accessor temperature: Float? - attr_accessor tool_resources: OpenAI::Models::Beta::AssistantCreateParams::ToolResources? + attr_accessor tool_resources: OpenAI::Beta::AssistantCreateParams::ToolResources? attr_reader tools: ::Array[OpenAI::Models::Beta::assistant_tool]? @@ -47,110 +47,111 @@ module OpenAI attr_accessor top_p: Float? - def initialize: - ( - model: OpenAI::Models::Beta::AssistantCreateParams::model, - description: String?, - instructions: String?, - metadata: OpenAI::Models::metadata?, - name: String?, - reasoning_effort: OpenAI::Models::reasoning_effort?, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - tool_resources: OpenAI::Models::Beta::AssistantCreateParams::ToolResources?, - tools: ::Array[OpenAI::Models::Beta::assistant_tool], - top_p: Float?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::assistant_create_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::assistant_create_params + def initialize: ( + model: OpenAI::Models::Beta::AssistantCreateParams::model, + ?description: String?, + ?instructions: String?, + ?metadata: OpenAI::Models::metadata?, + ?name: String?, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?tool_resources: OpenAI::Beta::AssistantCreateParams::ToolResources?, + ?tools: ::Array[OpenAI::Models::Beta::assistant_tool], + ?top_p: Float?, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + model: OpenAI::Models::Beta::AssistantCreateParams::model, + description: String?, + instructions: String?, + metadata: OpenAI::Models::metadata?, + name: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + response_format: OpenAI::Models::Beta::assistant_response_format_option?, + temperature: Float?, + tool_resources: OpenAI::Beta::AssistantCreateParams::ToolResources?, + tools: ::Array[OpenAI::Models::Beta::assistant_tool], + top_p: Float?, + request_options: OpenAI::RequestOptions + } type model = String | OpenAI::Models::chat_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::AssistantCreateParams::model] end type tool_resources = { - code_interpreter: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch + code_interpreter: OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch } - class ToolResources < OpenAI::BaseModel - attr_reader code_interpreter: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter? + class ToolResources < OpenAI::Internal::Type::BaseModel + attr_reader code_interpreter: OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter? def code_interpreter=: ( - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter - ) -> OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter + OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter + ) -> OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter - attr_reader file_search: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch? + attr_reader file_search: OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch? def file_search=: ( - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch - ) -> OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch + ) -> OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch - def initialize: - ( - code_interpreter: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantCreateParams::tool_resources - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?code_interpreter: OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, + ?file_search: OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch + ) -> void - def to_hash: -> OpenAI::Models::Beta::AssistantCreateParams::tool_resources + def to_hash: -> { + code_interpreter: OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch + } type code_interpreter = { file_ids: ::Array[String] } - class CodeInterpreter < OpenAI::BaseModel + class CodeInterpreter < OpenAI::Internal::Type::BaseModel attr_reader file_ids: ::Array[String]? def file_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (file_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::AssistantCreateParams::ToolResources::code_interpreter - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::AssistantCreateParams::ToolResources::code_interpreter + def to_hash: -> { file_ids: ::Array[String] } end type file_search = { vector_store_ids: ::Array[String], - vector_stores: ::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] + vector_stores: ::Array[OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_reader vector_store_ids: ::Array[String]? def vector_store_ids=: (::Array[String]) -> ::Array[String] - attr_reader vector_stores: ::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore]? + attr_reader vector_stores: ::Array[OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore]? def vector_stores=: ( - ::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] - ) -> ::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] + ::Array[OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] + ) -> ::Array[OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] - def initialize: - ( - vector_store_ids: ::Array[String], - vector_stores: ::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantCreateParams::ToolResources::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?vector_store_ids: ::Array[String], + ?vector_stores: ::Array[OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] + ) -> void - def to_hash: -> OpenAI::Models::Beta::AssistantCreateParams::ToolResources::file_search + def to_hash: -> { + vector_store_ids: ::Array[String], + vector_stores: ::Array[OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore] + } type vector_store = { @@ -159,7 +160,7 @@ module OpenAI metadata: OpenAI::Models::metadata? } - class VectorStore < OpenAI::BaseModel + class VectorStore < OpenAI::Internal::Type::BaseModel attr_reader chunking_strategy: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::chunking_strategy? def chunking_strategy=: ( @@ -172,61 +173,55 @@ module OpenAI attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - chunking_strategy: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::chunking_strategy, - file_ids: ::Array[String], - metadata: OpenAI::Models::metadata? - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::vector_store - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?chunking_strategy: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::chunking_strategy, + ?file_ids: ::Array[String], + ?metadata: OpenAI::Models::metadata? + ) -> void - def to_hash: -> OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::vector_store + def to_hash: -> { + chunking_strategy: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::chunking_strategy, + file_ids: ::Array[String], + metadata: OpenAI::Models::metadata? + } type chunking_strategy = - OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto - | OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto + | OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + + module ChunkingStrategy + extend OpenAI::Internal::Type::Union - class ChunkingStrategy < OpenAI::Union type auto = { type: :auto } - class Auto < OpenAI::BaseModel + class Auto < OpenAI::Internal::Type::BaseModel attr_accessor type: :auto - def initialize: - (type: :auto) -> void - | ( - ?OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::auto - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :auto) -> void - def to_hash: -> OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::auto + def to_hash: -> { type: :auto } end type static = { - static: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + static: OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, type: :static } - class Static < OpenAI::BaseModel - attr_accessor static: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static + class Static < OpenAI::Internal::Type::BaseModel + attr_accessor static: OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static attr_accessor type: :static - def initialize: - ( - static: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, - type: :static - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::static - | OpenAI::BaseModel data - ) -> void + def initialize: ( + static: OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + ?type: :static + ) -> void - def to_hash: -> OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::static + def to_hash: -> { + static: OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + type: :static + } type static = { @@ -234,26 +229,24 @@ module OpenAI max_chunk_size_tokens: Integer } - class Static < OpenAI::BaseModel + class Static < OpenAI::Internal::Type::BaseModel attr_accessor chunk_overlap_tokens: Integer attr_accessor max_chunk_size_tokens: Integer - def initialize: - ( - chunk_overlap_tokens: Integer, - max_chunk_size_tokens: Integer - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::static - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::static + def initialize: ( + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + ) -> void + + def to_hash: -> { + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + } end end - private def self.variants: -> [[:auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [:static, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::chunking_strategy] end end end diff --git a/sig/openai/models/beta/assistant_delete_params.rbs b/sig/openai/models/beta/assistant_delete_params.rbs index fae651c6..e476dced 100644 --- a/sig/openai/models/beta/assistant_delete_params.rbs +++ b/sig/openai/models/beta/assistant_delete_params.rbs @@ -1,20 +1,16 @@ module OpenAI module Models module Beta - type assistant_delete_params = { } & OpenAI::request_parameters + type assistant_delete_params = + { } & OpenAI::Internal::Type::request_parameters - class AssistantDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class AssistantDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Beta::assistant_delete_params - | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::Beta::assistant_delete_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/beta/assistant_deleted.rbs b/sig/openai/models/beta/assistant_deleted.rbs index 655b5bdc..f5fe869a 100644 --- a/sig/openai/models/beta/assistant_deleted.rbs +++ b/sig/openai/models/beta/assistant_deleted.rbs @@ -4,20 +4,24 @@ module OpenAI type assistant_deleted = { id: String, deleted: bool, object: :"assistant.deleted" } - class AssistantDeleted < OpenAI::BaseModel + class AssistantDeleted < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor deleted: bool attr_accessor object: :"assistant.deleted" - def initialize: - (id: String, deleted: bool, object: :"assistant.deleted") -> void - | ( - ?OpenAI::Models::Beta::assistant_deleted | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + deleted: bool, + ?object: :"assistant.deleted" + ) -> void - def to_hash: -> OpenAI::Models::Beta::assistant_deleted + def to_hash: -> { + id: String, + deleted: bool, + object: :"assistant.deleted" + } end end end diff --git a/sig/openai/models/beta/assistant_list_params.rbs b/sig/openai/models/beta/assistant_list_params.rbs index c2254036..d9fea65e 100644 --- a/sig/openai/models/beta/assistant_list_params.rbs +++ b/sig/openai/models/beta/assistant_list_params.rbs @@ -8,11 +8,11 @@ module OpenAI limit: Integer, order: OpenAI::Models::Beta::AssistantListParams::order } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class AssistantListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class AssistantListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -32,28 +32,31 @@ module OpenAI OpenAI::Models::Beta::AssistantListParams::order ) -> OpenAI::Models::Beta::AssistantListParams::order - def initialize: - ( - after: String, - before: String, - limit: Integer, - order: OpenAI::Models::Beta::AssistantListParams::order, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::assistant_list_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::assistant_list_params + def initialize: ( + ?after: String, + ?before: String, + ?limit: Integer, + ?order: OpenAI::Models::Beta::AssistantListParams::order, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + before: String, + limit: Integer, + order: OpenAI::Models::Beta::AssistantListParams::order, + request_options: OpenAI::RequestOptions + } type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::Beta::AssistantListParams::order] + def self?.values: -> ::Array[OpenAI::Models::Beta::AssistantListParams::order] end end end diff --git a/sig/openai/models/beta/assistant_response_format_option.rbs b/sig/openai/models/beta/assistant_response_format_option.rbs index c9efaa33..7ae082e6 100644 --- a/sig/openai/models/beta/assistant_response_format_option.rbs +++ b/sig/openai/models/beta/assistant_response_format_option.rbs @@ -3,12 +3,14 @@ module OpenAI module Beta type assistant_response_format_option = :auto - | OpenAI::Models::ResponseFormatText - | OpenAI::Models::ResponseFormatJSONObject - | OpenAI::Models::ResponseFormatJSONSchema + | OpenAI::ResponseFormatText + | OpenAI::ResponseFormatJSONObject + | OpenAI::ResponseFormatJSONSchema - class AssistantResponseFormatOption < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, OpenAI::Models::ResponseFormatText], [nil, OpenAI::Models::ResponseFormatJSONObject], [nil, OpenAI::Models::ResponseFormatJSONSchema]] + module AssistantResponseFormatOption + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::assistant_response_format_option] end end end diff --git a/sig/openai/models/beta/assistant_retrieve_params.rbs b/sig/openai/models/beta/assistant_retrieve_params.rbs index a1148ac9..be66a758 100644 --- a/sig/openai/models/beta/assistant_retrieve_params.rbs +++ b/sig/openai/models/beta/assistant_retrieve_params.rbs @@ -1,20 +1,16 @@ module OpenAI module Models module Beta - type assistant_retrieve_params = { } & OpenAI::request_parameters + type assistant_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters - class AssistantRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class AssistantRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Beta::assistant_retrieve_params - | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::Beta::assistant_retrieve_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/beta/assistant_stream_event.rbs b/sig/openai/models/beta/assistant_stream_event.rbs index a22d0f87..0852c3c3 100644 --- a/sig/openai/models/beta/assistant_stream_event.rbs +++ b/sig/openai/models/beta/assistant_stream_event.rbs @@ -2,41 +2,43 @@ module OpenAI module Models module Beta type assistant_stream_event = - OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted - | OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete - | OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent - - class AssistantStreamEvent < OpenAI::Union + OpenAI::Beta::AssistantStreamEvent::ThreadCreated + | OpenAI::Beta::AssistantStreamEvent::ThreadRunCreated + | OpenAI::Beta::AssistantStreamEvent::ThreadRunQueued + | OpenAI::Beta::AssistantStreamEvent::ThreadRunInProgress + | OpenAI::Beta::AssistantStreamEvent::ThreadRunRequiresAction + | OpenAI::Beta::AssistantStreamEvent::ThreadRunCompleted + | OpenAI::Beta::AssistantStreamEvent::ThreadRunIncomplete + | OpenAI::Beta::AssistantStreamEvent::ThreadRunFailed + | OpenAI::Beta::AssistantStreamEvent::ThreadRunCancelling + | OpenAI::Beta::AssistantStreamEvent::ThreadRunCancelled + | OpenAI::Beta::AssistantStreamEvent::ThreadRunExpired + | OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCreated + | OpenAI::Beta::AssistantStreamEvent::ThreadRunStepInProgress + | OpenAI::Beta::AssistantStreamEvent::ThreadRunStepDelta + | OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCompleted + | OpenAI::Beta::AssistantStreamEvent::ThreadRunStepFailed + | OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCancelled + | OpenAI::Beta::AssistantStreamEvent::ThreadRunStepExpired + | OpenAI::Beta::AssistantStreamEvent::ThreadMessageCreated + | OpenAI::Beta::AssistantStreamEvent::ThreadMessageInProgress + | OpenAI::Beta::AssistantStreamEvent::ThreadMessageDelta + | OpenAI::Beta::AssistantStreamEvent::ThreadMessageCompleted + | OpenAI::Beta::AssistantStreamEvent::ThreadMessageIncomplete + | OpenAI::Beta::AssistantStreamEvent::ErrorEvent + + module AssistantStreamEvent + extend OpenAI::Internal::Type::Union + type thread_created = { - data: OpenAI::Models::Beta::Thread, + data: OpenAI::Beta::Thread, event: :"thread.created", enabled: bool } - class ThreadCreated < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Thread + class ThreadCreated < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Thread attr_accessor event: :"thread.created" @@ -44,566 +46,489 @@ module OpenAI def enabled=: (bool) -> bool - def initialize: - ( - data: OpenAI::Models::Beta::Thread, - enabled: bool, - event: :"thread.created" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_created - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_created + def initialize: ( + data: OpenAI::Beta::Thread, + ?enabled: bool, + ?event: :"thread.created" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Thread, + event: :"thread.created", + enabled: bool + } end type thread_run_created = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.created" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.created" } - class ThreadRunCreated < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunCreated < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.created" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.created" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_created - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_created + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.created" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.created" + } end type thread_run_queued = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.queued" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.queued" } - class ThreadRunQueued < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunQueued < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.queued" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.queued" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_queued - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_queued + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.queued" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.queued" + } end type thread_run_in_progress = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.in_progress" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.in_progress" } - class ThreadRunInProgress < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.in_progress" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.in_progress" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_in_progress - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_in_progress + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.in_progress" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.in_progress" + } end type thread_run_requires_action = { - data: OpenAI::Models::Beta::Threads::Run, + data: OpenAI::Beta::Threads::Run, event: :"thread.run.requires_action" } - class ThreadRunRequiresAction < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.requires_action" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.requires_action" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_requires_action - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_requires_action + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.requires_action" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.requires_action" + } end type thread_run_completed = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.completed" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.completed" } - class ThreadRunCompleted < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.completed" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.completed" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_completed - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_completed + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.completed" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.completed" + } end type thread_run_incomplete = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.incomplete" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.incomplete" } - class ThreadRunIncomplete < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.incomplete" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.incomplete" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_incomplete - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_incomplete + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.incomplete" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.incomplete" + } end type thread_run_failed = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.failed" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.failed" } - class ThreadRunFailed < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunFailed < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.failed" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.failed" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_failed - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_failed + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.failed" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.failed" + } end type thread_run_cancelling = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.cancelling" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.cancelling" } - class ThreadRunCancelling < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.cancelling" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.cancelling" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_cancelling - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_cancelling + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.cancelling" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.cancelling" + } end type thread_run_cancelled = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.cancelled" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.cancelled" } - class ThreadRunCancelled < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.cancelled" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.cancelled" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_cancelled - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_cancelled + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.cancelled" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.cancelled" + } end type thread_run_expired = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.expired" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.expired" } - class ThreadRunExpired < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunExpired < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.expired" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.expired" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_expired - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_expired + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.expired" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.expired" + } end type thread_run_step_created = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.created" } - class ThreadRunStepCreated < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepCreated < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.created" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.created" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_created - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_created + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.created" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.created" + } end type thread_run_step_in_progress = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.in_progress" } - class ThreadRunStepInProgress < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.in_progress" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.in_progress" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_in_progress - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_in_progress + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.in_progress" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.in_progress" + } end type thread_run_step_delta = { - data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent, event: :"thread.run.step.delta" } - class ThreadRunStepDelta < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent + class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent attr_accessor event: :"thread.run.step.delta" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, - event: :"thread.run.step.delta" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_delta - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_delta + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent, + ?event: :"thread.run.step.delta" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent, + event: :"thread.run.step.delta" + } end type thread_run_step_completed = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.completed" } - class ThreadRunStepCompleted < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepCompleted < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.completed" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.completed" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_completed - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_completed + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.completed" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.completed" + } end type thread_run_step_failed = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.failed" } - class ThreadRunStepFailed < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepFailed < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.failed" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.failed" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_failed - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_failed + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.failed" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.failed" + } end type thread_run_step_cancelled = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.cancelled" } - class ThreadRunStepCancelled < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepCancelled < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.cancelled" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.cancelled" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_cancelled - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_cancelled + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.cancelled" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.cancelled" + } end type thread_run_step_expired = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.expired" } - class ThreadRunStepExpired < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.expired" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.expired" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_expired - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_run_step_expired + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.expired" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.expired" + } end type thread_message_created = { - data: OpenAI::Models::Beta::Threads::Message, + data: OpenAI::Beta::Threads::Message, event: :"thread.message.created" } - class ThreadMessageCreated < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Message + class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Message attr_accessor event: :"thread.message.created" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Message, - event: :"thread.message.created" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_message_created - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_message_created + def initialize: ( + data: OpenAI::Beta::Threads::Message, + ?event: :"thread.message.created" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Message, + event: :"thread.message.created" + } end type thread_message_in_progress = { - data: OpenAI::Models::Beta::Threads::Message, + data: OpenAI::Beta::Threads::Message, event: :"thread.message.in_progress" } - class ThreadMessageInProgress < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Message + class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Message attr_accessor event: :"thread.message.in_progress" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Message, - event: :"thread.message.in_progress" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_message_in_progress - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_message_in_progress + def initialize: ( + data: OpenAI::Beta::Threads::Message, + ?event: :"thread.message.in_progress" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Message, + event: :"thread.message.in_progress" + } end type thread_message_delta = { - data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, + data: OpenAI::Beta::Threads::MessageDeltaEvent, event: :"thread.message.delta" } - class ThreadMessageDelta < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::MessageDeltaEvent + class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::MessageDeltaEvent attr_accessor event: :"thread.message.delta" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, - event: :"thread.message.delta" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_message_delta - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_message_delta + def initialize: ( + data: OpenAI::Beta::Threads::MessageDeltaEvent, + ?event: :"thread.message.delta" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::MessageDeltaEvent, + event: :"thread.message.delta" + } end type thread_message_completed = { - data: OpenAI::Models::Beta::Threads::Message, + data: OpenAI::Beta::Threads::Message, event: :"thread.message.completed" } - class ThreadMessageCompleted < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Message + class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Message attr_accessor event: :"thread.message.completed" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Message, - event: :"thread.message.completed" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_message_completed - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_message_completed + def initialize: ( + data: OpenAI::Beta::Threads::Message, + ?event: :"thread.message.completed" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Message, + event: :"thread.message.completed" + } end type thread_message_incomplete = { - data: OpenAI::Models::Beta::Threads::Message, + data: OpenAI::Beta::Threads::Message, event: :"thread.message.incomplete" } - class ThreadMessageIncomplete < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Message + class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Message attr_accessor event: :"thread.message.incomplete" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Message, - event: :"thread.message.incomplete" - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::thread_message_incomplete - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::thread_message_incomplete + def initialize: ( + data: OpenAI::Beta::Threads::Message, + ?event: :"thread.message.incomplete" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Message, + event: :"thread.message.incomplete" + } end - type error_event = { data: OpenAI::Models::ErrorObject, event: :error } + type error_event = { data: OpenAI::ErrorObject, event: :error } - class ErrorEvent < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::ErrorObject + class ErrorEvent < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::ErrorObject attr_accessor event: :error - def initialize: - (data: OpenAI::Models::ErrorObject, event: :error) -> void - | ( - ?OpenAI::Models::Beta::AssistantStreamEvent::error_event - | OpenAI::BaseModel data - ) -> void + def initialize: (data: OpenAI::ErrorObject, ?event: :error) -> void - def to_hash: -> OpenAI::Models::Beta::AssistantStreamEvent::error_event + def to_hash: -> { data: OpenAI::ErrorObject, event: :error } end - private def self.variants: -> [[:"thread.created", OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated], [:"thread.run.created", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated], [:"thread.run.queued", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued], [:"thread.run.in_progress", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress], [:"thread.run.requires_action", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction], [:"thread.run.completed", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted], [:"thread.run.incomplete", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete], [:"thread.run.failed", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed], [:"thread.run.cancelling", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling], [:"thread.run.cancelled", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled], [:"thread.run.expired", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired], [:"thread.run.step.created", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated], [:"thread.run.step.in_progress", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress], [:"thread.run.step.delta", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta], [:"thread.run.step.completed", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted], [:"thread.run.step.failed", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed], [:"thread.run.step.cancelled", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled], [:"thread.run.step.expired", OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired], [:"thread.message.created", OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated], [:"thread.message.in_progress", OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress], [:"thread.message.delta", OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta], [:"thread.message.completed", OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted], [:"thread.message.incomplete", OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete], [:error, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::assistant_stream_event] end end end diff --git a/sig/openai/models/beta/assistant_tool.rbs b/sig/openai/models/beta/assistant_tool.rbs index 054c260d..6a65bb27 100644 --- a/sig/openai/models/beta/assistant_tool.rbs +++ b/sig/openai/models/beta/assistant_tool.rbs @@ -2,12 +2,14 @@ module OpenAI module Models module Beta type assistant_tool = - OpenAI::Models::Beta::CodeInterpreterTool - | OpenAI::Models::Beta::FileSearchTool - | OpenAI::Models::Beta::FunctionTool + OpenAI::Beta::CodeInterpreterTool + | OpenAI::Beta::FileSearchTool + | OpenAI::Beta::FunctionTool - class AssistantTool < OpenAI::Union - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::CodeInterpreterTool], [:file_search, OpenAI::Models::Beta::FileSearchTool], [:function, OpenAI::Models::Beta::FunctionTool]] + module AssistantTool + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::assistant_tool] end end end diff --git a/sig/openai/models/beta/assistant_tool_choice.rbs b/sig/openai/models/beta/assistant_tool_choice.rbs index e3234bbe..1d80009b 100644 --- a/sig/openai/models/beta/assistant_tool_choice.rbs +++ b/sig/openai/models/beta/assistant_tool_choice.rbs @@ -4,38 +4,38 @@ module OpenAI type assistant_tool_choice = { type: OpenAI::Models::Beta::AssistantToolChoice::type_, - function: OpenAI::Models::Beta::AssistantToolChoiceFunction + function: OpenAI::Beta::AssistantToolChoiceFunction } - class AssistantToolChoice < OpenAI::BaseModel + class AssistantToolChoice < OpenAI::Internal::Type::BaseModel attr_accessor type: OpenAI::Models::Beta::AssistantToolChoice::type_ - attr_reader function: OpenAI::Models::Beta::AssistantToolChoiceFunction? + attr_reader function: OpenAI::Beta::AssistantToolChoiceFunction? def function=: ( - OpenAI::Models::Beta::AssistantToolChoiceFunction - ) -> OpenAI::Models::Beta::AssistantToolChoiceFunction + OpenAI::Beta::AssistantToolChoiceFunction + ) -> OpenAI::Beta::AssistantToolChoiceFunction - def initialize: - ( - type: OpenAI::Models::Beta::AssistantToolChoice::type_, - function: OpenAI::Models::Beta::AssistantToolChoiceFunction - ) -> void - | ( - ?OpenAI::Models::Beta::assistant_tool_choice - | OpenAI::BaseModel data - ) -> void + def initialize: ( + type: OpenAI::Models::Beta::AssistantToolChoice::type_, + ?function: OpenAI::Beta::AssistantToolChoiceFunction + ) -> void - def to_hash: -> OpenAI::Models::Beta::assistant_tool_choice + def to_hash: -> { + type: OpenAI::Models::Beta::AssistantToolChoice::type_, + function: OpenAI::Beta::AssistantToolChoiceFunction + } type type_ = :function | :code_interpreter | :file_search - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + FUNCTION: :function CODE_INTERPRETER: :code_interpreter FILE_SEARCH: :file_search - def self.values: -> ::Array[OpenAI::Models::Beta::AssistantToolChoice::type_] + def self?.values: -> ::Array[OpenAI::Models::Beta::AssistantToolChoice::type_] end end end diff --git a/sig/openai/models/beta/assistant_tool_choice_function.rbs b/sig/openai/models/beta/assistant_tool_choice_function.rbs index f10e8dd0..b97b9891 100644 --- a/sig/openai/models/beta/assistant_tool_choice_function.rbs +++ b/sig/openai/models/beta/assistant_tool_choice_function.rbs @@ -3,17 +3,12 @@ module OpenAI module Beta type assistant_tool_choice_function = { name: String } - class AssistantToolChoiceFunction < OpenAI::BaseModel + class AssistantToolChoiceFunction < OpenAI::Internal::Type::BaseModel attr_accessor name: String - def initialize: - (name: String) -> void - | ( - ?OpenAI::Models::Beta::assistant_tool_choice_function - | OpenAI::BaseModel data - ) -> void + def initialize: (name: String) -> void - def to_hash: -> OpenAI::Models::Beta::assistant_tool_choice_function + def to_hash: -> { name: String } end end end diff --git a/sig/openai/models/beta/assistant_tool_choice_option.rbs b/sig/openai/models/beta/assistant_tool_choice_option.rbs index e8243ffd..e879d0d5 100644 --- a/sig/openai/models/beta/assistant_tool_choice_option.rbs +++ b/sig/openai/models/beta/assistant_tool_choice_option.rbs @@ -3,20 +3,24 @@ module OpenAI module Beta type assistant_tool_choice_option = OpenAI::Models::Beta::AssistantToolChoiceOption::auto - | OpenAI::Models::Beta::AssistantToolChoice + | OpenAI::Beta::AssistantToolChoice + + module AssistantToolChoiceOption + extend OpenAI::Internal::Type::Union - class AssistantToolChoiceOption < OpenAI::Union type auto = :none | :auto | :required - class Auto < OpenAI::Enum + module Auto + extend OpenAI::Internal::Type::Enum + NONE: :none AUTO: :auto REQUIRED: :required - def self.values: -> ::Array[OpenAI::Models::Beta::AssistantToolChoiceOption::auto] + def self?.values: -> ::Array[OpenAI::Models::Beta::AssistantToolChoiceOption::auto] end - private def self.variants: -> [[nil, OpenAI::Models::Beta::AssistantToolChoiceOption::auto], [nil, OpenAI::Models::Beta::AssistantToolChoice]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::assistant_tool_choice_option] end end end diff --git a/sig/openai/models/beta/assistant_update_params.rbs b/sig/openai/models/beta/assistant_update_params.rbs index 8e5079c9..9d1254f3 100644 --- a/sig/openai/models/beta/assistant_update_params.rbs +++ b/sig/openai/models/beta/assistant_update_params.rbs @@ -11,15 +11,15 @@ module OpenAI reasoning_effort: OpenAI::Models::reasoning_effort?, response_format: OpenAI::Models::Beta::assistant_response_format_option?, temperature: Float?, - tool_resources: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources?, + tool_resources: OpenAI::Beta::AssistantUpdateParams::ToolResources?, tools: ::Array[OpenAI::Models::Beta::assistant_tool], top_p: Float? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class AssistantUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor description: String? @@ -41,7 +41,7 @@ module OpenAI attr_accessor temperature: Float? - attr_accessor tool_resources: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources? + attr_accessor tool_resources: OpenAI::Beta::AssistantUpdateParams::ToolResources? attr_reader tools: ::Array[OpenAI::Models::Beta::assistant_tool]? @@ -51,166 +51,181 @@ module OpenAI attr_accessor top_p: Float? - def initialize: - ( - description: String?, - instructions: String?, - metadata: OpenAI::Models::metadata?, - model: OpenAI::Models::Beta::AssistantUpdateParams::model, - name: String?, - reasoning_effort: OpenAI::Models::reasoning_effort?, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - tool_resources: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources?, - tools: ::Array[OpenAI::Models::Beta::assistant_tool], - top_p: Float?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::assistant_update_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::assistant_update_params + def initialize: ( + ?description: String?, + ?instructions: String?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::Beta::AssistantUpdateParams::model, + ?name: String?, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?tool_resources: OpenAI::Beta::AssistantUpdateParams::ToolResources?, + ?tools: ::Array[OpenAI::Models::Beta::assistant_tool], + ?top_p: Float?, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + description: String?, + instructions: String?, + metadata: OpenAI::Models::metadata?, + model: OpenAI::Models::Beta::AssistantUpdateParams::model, + name: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + response_format: OpenAI::Models::Beta::assistant_response_format_option?, + temperature: Float?, + tool_resources: OpenAI::Beta::AssistantUpdateParams::ToolResources?, + tools: ::Array[OpenAI::Models::Beta::assistant_tool], + top_p: Float?, + request_options: OpenAI::RequestOptions + } type model = String - | OpenAI::Models::Beta::AssistantUpdateParams::Model::assistant_supported_models - - class Model < OpenAI::Union - type assistant_supported_models = - :"o3-mini" - | :"o3-mini-2025-01-31" - | :o1 - | :"o1-2024-12-17" - | :"gpt-4o" - | :"gpt-4o-2024-11-20" - | :"gpt-4o-2024-08-06" - | :"gpt-4o-2024-05-13" - | :"gpt-4o-mini" - | :"gpt-4o-mini-2024-07-18" - | :"gpt-4.5-preview" - | :"gpt-4.5-preview-2025-02-27" - | :"gpt-4-turbo" - | :"gpt-4-turbo-2024-04-09" - | :"gpt-4-0125-preview" - | :"gpt-4-turbo-preview" - | :"gpt-4-1106-preview" - | :"gpt-4-vision-preview" - | :"gpt-4" - | :"gpt-4-0314" - | :"gpt-4-0613" - | :"gpt-4-32k" - | :"gpt-4-32k-0314" - | :"gpt-4-32k-0613" - | :"gpt-3.5-turbo" - | :"gpt-3.5-turbo-16k" - | :"gpt-3.5-turbo-0613" - | :"gpt-3.5-turbo-1106" - | :"gpt-3.5-turbo-0125" - | :"gpt-3.5-turbo-16k-0613" - - class AssistantSupportedModels < OpenAI::Enum - O3_MINI: :"o3-mini" - O3_MINI_2025_01_31: :"o3-mini-2025-01-31" - O1: :o1 - O1_2024_12_17: :"o1-2024-12-17" - GPT_4O: :"gpt-4o" - GPT_4O_2024_11_20: :"gpt-4o-2024-11-20" - GPT_4O_2024_08_06: :"gpt-4o-2024-08-06" - GPT_4O_2024_05_13: :"gpt-4o-2024-05-13" - GPT_4O_MINI: :"gpt-4o-mini" - GPT_4O_MINI_2024_07_18: :"gpt-4o-mini-2024-07-18" - GPT_4_5_PREVIEW: :"gpt-4.5-preview" - GPT_4_5_PREVIEW_2025_02_27: :"gpt-4.5-preview-2025-02-27" - GPT_4_TURBO: :"gpt-4-turbo" - GPT_4_TURBO_2024_04_09: :"gpt-4-turbo-2024-04-09" - GPT_4_0125_PREVIEW: :"gpt-4-0125-preview" - GPT_4_TURBO_PREVIEW: :"gpt-4-turbo-preview" - GPT_4_1106_PREVIEW: :"gpt-4-1106-preview" - GPT_4_VISION_PREVIEW: :"gpt-4-vision-preview" - GPT_4: :"gpt-4" - GPT_4_0314: :"gpt-4-0314" - GPT_4_0613: :"gpt-4-0613" - GPT_4_32K: :"gpt-4-32k" - GPT_4_32K_0314: :"gpt-4-32k-0314" - GPT_4_32K_0613: :"gpt-4-32k-0613" - GPT_3_5_TURBO: :"gpt-3.5-turbo" - GPT_3_5_TURBO_16K: :"gpt-3.5-turbo-16k" - GPT_3_5_TURBO_0613: :"gpt-3.5-turbo-0613" - GPT_3_5_TURBO_1106: :"gpt-3.5-turbo-1106" - GPT_3_5_TURBO_0125: :"gpt-3.5-turbo-0125" - GPT_3_5_TURBO_16K_0613: :"gpt-3.5-turbo-16k-0613" - - def self.values: -> ::Array[OpenAI::Models::Beta::AssistantUpdateParams::Model::assistant_supported_models] - end - - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::Beta::AssistantUpdateParams::Model::assistant_supported_models]] + | :"gpt-5" + | :"gpt-5-mini" + | :"gpt-5-nano" + | :"gpt-5-2025-08-07" + | :"gpt-5-mini-2025-08-07" + | :"gpt-5-nano-2025-08-07" + | :"gpt-4.1" + | :"gpt-4.1-mini" + | :"gpt-4.1-nano" + | :"gpt-4.1-2025-04-14" + | :"gpt-4.1-mini-2025-04-14" + | :"gpt-4.1-nano-2025-04-14" + | :"o3-mini" + | :"o3-mini-2025-01-31" + | :o1 + | :"o1-2024-12-17" + | :"gpt-4o" + | :"gpt-4o-2024-11-20" + | :"gpt-4o-2024-08-06" + | :"gpt-4o-2024-05-13" + | :"gpt-4o-mini" + | :"gpt-4o-mini-2024-07-18" + | :"gpt-4.5-preview" + | :"gpt-4.5-preview-2025-02-27" + | :"gpt-4-turbo" + | :"gpt-4-turbo-2024-04-09" + | :"gpt-4-0125-preview" + | :"gpt-4-turbo-preview" + | :"gpt-4-1106-preview" + | :"gpt-4-vision-preview" + | :"gpt-4" + | :"gpt-4-0314" + | :"gpt-4-0613" + | :"gpt-4-32k" + | :"gpt-4-32k-0314" + | :"gpt-4-32k-0613" + | :"gpt-3.5-turbo" + | :"gpt-3.5-turbo-16k" + | :"gpt-3.5-turbo-0613" + | :"gpt-3.5-turbo-1106" + | :"gpt-3.5-turbo-0125" + | :"gpt-3.5-turbo-16k-0613" + + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::AssistantUpdateParams::model] + + GPT_5: :"gpt-5" + GPT_5_MINI: :"gpt-5-mini" + GPT_5_NANO: :"gpt-5-nano" + GPT_5_2025_08_07: :"gpt-5-2025-08-07" + GPT_5_MINI_2025_08_07: :"gpt-5-mini-2025-08-07" + GPT_5_NANO_2025_08_07: :"gpt-5-nano-2025-08-07" + GPT_4_1: :"gpt-4.1" + GPT_4_1_MINI: :"gpt-4.1-mini" + GPT_4_1_NANO: :"gpt-4.1-nano" + GPT_4_1_2025_04_14: :"gpt-4.1-2025-04-14" + GPT_4_1_MINI_2025_04_14: :"gpt-4.1-mini-2025-04-14" + GPT_4_1_NANO_2025_04_14: :"gpt-4.1-nano-2025-04-14" + O3_MINI: :"o3-mini" + O3_MINI_2025_01_31: :"o3-mini-2025-01-31" + O1: :o1 + O1_2024_12_17: :"o1-2024-12-17" + GPT_4O: :"gpt-4o" + GPT_4O_2024_11_20: :"gpt-4o-2024-11-20" + GPT_4O_2024_08_06: :"gpt-4o-2024-08-06" + GPT_4O_2024_05_13: :"gpt-4o-2024-05-13" + GPT_4O_MINI: :"gpt-4o-mini" + GPT_4O_MINI_2024_07_18: :"gpt-4o-mini-2024-07-18" + GPT_4_5_PREVIEW: :"gpt-4.5-preview" + GPT_4_5_PREVIEW_2025_02_27: :"gpt-4.5-preview-2025-02-27" + GPT_4_TURBO: :"gpt-4-turbo" + GPT_4_TURBO_2024_04_09: :"gpt-4-turbo-2024-04-09" + GPT_4_0125_PREVIEW: :"gpt-4-0125-preview" + GPT_4_TURBO_PREVIEW: :"gpt-4-turbo-preview" + GPT_4_1106_PREVIEW: :"gpt-4-1106-preview" + GPT_4_VISION_PREVIEW: :"gpt-4-vision-preview" + GPT_4: :"gpt-4" + GPT_4_0314: :"gpt-4-0314" + GPT_4_0613: :"gpt-4-0613" + GPT_4_32K: :"gpt-4-32k" + GPT_4_32K_0314: :"gpt-4-32k-0314" + GPT_4_32K_0613: :"gpt-4-32k-0613" + GPT_3_5_TURBO: :"gpt-3.5-turbo" + GPT_3_5_TURBO_16K: :"gpt-3.5-turbo-16k" + GPT_3_5_TURBO_0613: :"gpt-3.5-turbo-0613" + GPT_3_5_TURBO_1106: :"gpt-3.5-turbo-1106" + GPT_3_5_TURBO_0125: :"gpt-3.5-turbo-0125" + GPT_3_5_TURBO_16K_0613: :"gpt-3.5-turbo-16k-0613" end type tool_resources = { - code_interpreter: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch + code_interpreter: OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch } - class ToolResources < OpenAI::BaseModel - attr_reader code_interpreter: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter? + class ToolResources < OpenAI::Internal::Type::BaseModel + attr_reader code_interpreter: OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter? def code_interpreter=: ( - OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter - ) -> OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter + OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter + ) -> OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter - attr_reader file_search: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch? + attr_reader file_search: OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch? def file_search=: ( - OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch - ) -> OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch + OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch + ) -> OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch - def initialize: - ( - code_interpreter: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch - ) -> void - | ( - ?OpenAI::Models::Beta::AssistantUpdateParams::tool_resources - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?code_interpreter: OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, + ?file_search: OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch + ) -> void - def to_hash: -> OpenAI::Models::Beta::AssistantUpdateParams::tool_resources + def to_hash: -> { + code_interpreter: OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch + } type code_interpreter = { file_ids: ::Array[String] } - class CodeInterpreter < OpenAI::BaseModel + class CodeInterpreter < OpenAI::Internal::Type::BaseModel attr_reader file_ids: ::Array[String]? def file_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (file_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::code_interpreter - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::code_interpreter + def to_hash: -> { file_ids: ::Array[String] } end type file_search = { vector_store_ids: ::Array[String] } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_reader vector_store_ids: ::Array[String]? def vector_store_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (vector_store_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: (?vector_store_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::file_search + def to_hash: -> { vector_store_ids: ::Array[String] } end end end diff --git a/sig/openai/models/beta/code_interpreter_tool.rbs b/sig/openai/models/beta/code_interpreter_tool.rbs index 31372e23..bd9a6760 100644 --- a/sig/openai/models/beta/code_interpreter_tool.rbs +++ b/sig/openai/models/beta/code_interpreter_tool.rbs @@ -3,17 +3,12 @@ module OpenAI module Beta type code_interpreter_tool = { type: :code_interpreter } - class CodeInterpreterTool < OpenAI::BaseModel + class CodeInterpreterTool < OpenAI::Internal::Type::BaseModel attr_accessor type: :code_interpreter - def initialize: - (type: :code_interpreter) -> void - | ( - ?OpenAI::Models::Beta::code_interpreter_tool - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :code_interpreter) -> void - def to_hash: -> OpenAI::Models::Beta::code_interpreter_tool + def to_hash: -> { type: :code_interpreter } end end end diff --git a/sig/openai/models/beta/file_search_tool.rbs b/sig/openai/models/beta/file_search_tool.rbs index 731166b6..85f54d10 100644 --- a/sig/openai/models/beta/file_search_tool.rbs +++ b/sig/openai/models/beta/file_search_tool.rbs @@ -4,57 +4,54 @@ module OpenAI type file_search_tool = { type: :file_search, - file_search: OpenAI::Models::Beta::FileSearchTool::FileSearch + file_search: OpenAI::Beta::FileSearchTool::FileSearch } - class FileSearchTool < OpenAI::BaseModel + class FileSearchTool < OpenAI::Internal::Type::BaseModel attr_accessor type: :file_search - attr_reader file_search: OpenAI::Models::Beta::FileSearchTool::FileSearch? + attr_reader file_search: OpenAI::Beta::FileSearchTool::FileSearch? def file_search=: ( - OpenAI::Models::Beta::FileSearchTool::FileSearch - ) -> OpenAI::Models::Beta::FileSearchTool::FileSearch + OpenAI::Beta::FileSearchTool::FileSearch + ) -> OpenAI::Beta::FileSearchTool::FileSearch - def initialize: - ( - file_search: OpenAI::Models::Beta::FileSearchTool::FileSearch, - type: :file_search - ) -> void - | ( - ?OpenAI::Models::Beta::file_search_tool | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?file_search: OpenAI::Beta::FileSearchTool::FileSearch, + ?type: :file_search + ) -> void - def to_hash: -> OpenAI::Models::Beta::file_search_tool + def to_hash: -> { + type: :file_search, + file_search: OpenAI::Beta::FileSearchTool::FileSearch + } type file_search = { max_num_results: Integer, - ranking_options: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions + ranking_options: OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_reader max_num_results: Integer? def max_num_results=: (Integer) -> Integer - attr_reader ranking_options: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions? + attr_reader ranking_options: OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions? def ranking_options=: ( - OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions - ) -> OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions + OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions + ) -> OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions - def initialize: - ( - max_num_results: Integer, - ranking_options: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions - ) -> void - | ( - ?OpenAI::Models::Beta::FileSearchTool::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?max_num_results: Integer, + ?ranking_options: OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions + ) -> void - def to_hash: -> OpenAI::Models::Beta::FileSearchTool::file_search + def to_hash: -> { + max_num_results: Integer, + ranking_options: OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions + } type ranking_options = { @@ -62,7 +59,7 @@ module OpenAI ranker: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::ranker } - class RankingOptions < OpenAI::BaseModel + class RankingOptions < OpenAI::Internal::Type::BaseModel attr_accessor score_threshold: Float attr_reader ranker: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::ranker? @@ -71,25 +68,25 @@ module OpenAI OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::ranker ) -> OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::ranker - def initialize: - ( - score_threshold: Float, - ranker: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::ranker - ) -> void - | ( - ?OpenAI::Models::Beta::FileSearchTool::FileSearch::ranking_options - | OpenAI::BaseModel data - ) -> void + def initialize: ( + score_threshold: Float, + ?ranker: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::ranker + ) -> void - def to_hash: -> OpenAI::Models::Beta::FileSearchTool::FileSearch::ranking_options + def to_hash: -> { + score_threshold: Float, + ranker: OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::ranker + } type ranker = :auto | :default_2024_08_21 - class Ranker < OpenAI::Enum + module Ranker + extend OpenAI::Internal::Type::Enum + AUTO: :auto DEFAULT_2024_08_21: :default_2024_08_21 - def self.values: -> ::Array[OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::ranker] + def self?.values: -> ::Array[OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::ranker] end end end diff --git a/sig/openai/models/beta/function_tool.rbs b/sig/openai/models/beta/function_tool.rbs index 43a63e37..8798cc58 100644 --- a/sig/openai/models/beta/function_tool.rbs +++ b/sig/openai/models/beta/function_tool.rbs @@ -2,23 +2,22 @@ module OpenAI module Models module Beta type function_tool = - { function: OpenAI::Models::FunctionDefinition, type: :function } + { function: OpenAI::FunctionDefinition, type: :function } - class FunctionTool < OpenAI::BaseModel - attr_accessor function: OpenAI::Models::FunctionDefinition + class FunctionTool < OpenAI::Internal::Type::BaseModel + attr_accessor function: OpenAI::FunctionDefinition attr_accessor type: :function - def initialize: - ( - function: OpenAI::Models::FunctionDefinition, - type: :function - ) -> void - | ( - ?OpenAI::Models::Beta::function_tool | OpenAI::BaseModel data - ) -> void + def initialize: ( + function: OpenAI::FunctionDefinition, + ?type: :function + ) -> void - def to_hash: -> OpenAI::Models::Beta::function_tool + def to_hash: -> { + function: OpenAI::FunctionDefinition, + type: :function + } end end end diff --git a/sig/openai/models/beta/message_stream_event.rbs b/sig/openai/models/beta/message_stream_event.rbs index 376e54bc..eb4aed3e 100644 --- a/sig/openai/models/beta/message_stream_event.rbs +++ b/sig/openai/models/beta/message_stream_event.rbs @@ -2,134 +2,126 @@ module OpenAI module Models module Beta type message_stream_event = - OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated - | OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress - | OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta - | OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted - | OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete + OpenAI::Beta::MessageStreamEvent::ThreadMessageCreated + | OpenAI::Beta::MessageStreamEvent::ThreadMessageInProgress + | OpenAI::Beta::MessageStreamEvent::ThreadMessageDelta + | OpenAI::Beta::MessageStreamEvent::ThreadMessageCompleted + | OpenAI::Beta::MessageStreamEvent::ThreadMessageIncomplete + + module MessageStreamEvent + extend OpenAI::Internal::Type::Union - class MessageStreamEvent < OpenAI::Union type thread_message_created = { - data: OpenAI::Models::Beta::Threads::Message, + data: OpenAI::Beta::Threads::Message, event: :"thread.message.created" } - class ThreadMessageCreated < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Message + class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Message attr_accessor event: :"thread.message.created" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Message, - event: :"thread.message.created" - ) -> void - | ( - ?OpenAI::Models::Beta::MessageStreamEvent::thread_message_created - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::MessageStreamEvent::thread_message_created + def initialize: ( + data: OpenAI::Beta::Threads::Message, + ?event: :"thread.message.created" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Message, + event: :"thread.message.created" + } end type thread_message_in_progress = { - data: OpenAI::Models::Beta::Threads::Message, + data: OpenAI::Beta::Threads::Message, event: :"thread.message.in_progress" } - class ThreadMessageInProgress < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Message + class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Message attr_accessor event: :"thread.message.in_progress" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Message, - event: :"thread.message.in_progress" - ) -> void - | ( - ?OpenAI::Models::Beta::MessageStreamEvent::thread_message_in_progress - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::MessageStreamEvent::thread_message_in_progress + def initialize: ( + data: OpenAI::Beta::Threads::Message, + ?event: :"thread.message.in_progress" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Message, + event: :"thread.message.in_progress" + } end type thread_message_delta = { - data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, + data: OpenAI::Beta::Threads::MessageDeltaEvent, event: :"thread.message.delta" } - class ThreadMessageDelta < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::MessageDeltaEvent + class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::MessageDeltaEvent attr_accessor event: :"thread.message.delta" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, - event: :"thread.message.delta" - ) -> void - | ( - ?OpenAI::Models::Beta::MessageStreamEvent::thread_message_delta - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::MessageStreamEvent::thread_message_delta + def initialize: ( + data: OpenAI::Beta::Threads::MessageDeltaEvent, + ?event: :"thread.message.delta" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::MessageDeltaEvent, + event: :"thread.message.delta" + } end type thread_message_completed = { - data: OpenAI::Models::Beta::Threads::Message, + data: OpenAI::Beta::Threads::Message, event: :"thread.message.completed" } - class ThreadMessageCompleted < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Message + class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Message attr_accessor event: :"thread.message.completed" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Message, - event: :"thread.message.completed" - ) -> void - | ( - ?OpenAI::Models::Beta::MessageStreamEvent::thread_message_completed - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::MessageStreamEvent::thread_message_completed + def initialize: ( + data: OpenAI::Beta::Threads::Message, + ?event: :"thread.message.completed" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Message, + event: :"thread.message.completed" + } end type thread_message_incomplete = { - data: OpenAI::Models::Beta::Threads::Message, + data: OpenAI::Beta::Threads::Message, event: :"thread.message.incomplete" } - class ThreadMessageIncomplete < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Message + class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Message attr_accessor event: :"thread.message.incomplete" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Message, - event: :"thread.message.incomplete" - ) -> void - | ( - ?OpenAI::Models::Beta::MessageStreamEvent::thread_message_incomplete - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::MessageStreamEvent::thread_message_incomplete + def initialize: ( + data: OpenAI::Beta::Threads::Message, + ?event: :"thread.message.incomplete" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Message, + event: :"thread.message.incomplete" + } end - private def self.variants: -> [[:"thread.message.created", OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated], [:"thread.message.in_progress", OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress], [:"thread.message.delta", OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta], [:"thread.message.completed", OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted], [:"thread.message.incomplete", OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::message_stream_event] end end end diff --git a/sig/openai/models/beta/run_step_stream_event.rbs b/sig/openai/models/beta/run_step_stream_event.rbs index d8fa10c9..cf3454d9 100644 --- a/sig/openai/models/beta/run_step_stream_event.rbs +++ b/sig/openai/models/beta/run_step_stream_event.rbs @@ -2,184 +2,172 @@ module OpenAI module Models module Beta type run_step_stream_event = - OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated - | OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress - | OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta - | OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted - | OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed - | OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled - | OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired - - class RunStepStreamEvent < OpenAI::Union + OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCreated + | OpenAI::Beta::RunStepStreamEvent::ThreadRunStepInProgress + | OpenAI::Beta::RunStepStreamEvent::ThreadRunStepDelta + | OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCompleted + | OpenAI::Beta::RunStepStreamEvent::ThreadRunStepFailed + | OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCancelled + | OpenAI::Beta::RunStepStreamEvent::ThreadRunStepExpired + + module RunStepStreamEvent + extend OpenAI::Internal::Type::Union + type thread_run_step_created = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.created" } - class ThreadRunStepCreated < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepCreated < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.created" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.created" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_created - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_created + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.created" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.created" + } end type thread_run_step_in_progress = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.in_progress" } - class ThreadRunStepInProgress < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.in_progress" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.in_progress" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_in_progress - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_in_progress + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.in_progress" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.in_progress" + } end type thread_run_step_delta = { - data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent, event: :"thread.run.step.delta" } - class ThreadRunStepDelta < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent + class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent attr_accessor event: :"thread.run.step.delta" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, - event: :"thread.run.step.delta" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_delta - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_delta + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent, + ?event: :"thread.run.step.delta" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStepDeltaEvent, + event: :"thread.run.step.delta" + } end type thread_run_step_completed = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.completed" } - class ThreadRunStepCompleted < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepCompleted < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.completed" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.completed" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_completed - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_completed + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.completed" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.completed" + } end type thread_run_step_failed = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.failed" } - class ThreadRunStepFailed < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepFailed < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.failed" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.failed" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_failed - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_failed + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.failed" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.failed" + } end type thread_run_step_cancelled = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.cancelled" } - class ThreadRunStepCancelled < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepCancelled < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.cancelled" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.cancelled" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_cancelled - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_cancelled + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.cancelled" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.cancelled" + } end type thread_run_step_expired = { - data: OpenAI::Models::Beta::Threads::Runs::RunStep, + data: OpenAI::Beta::Threads::Runs::RunStep, event: :"thread.run.step.expired" } - class ThreadRunStepExpired < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Runs::RunStep + class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Runs::RunStep attr_accessor event: :"thread.run.step.expired" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Runs::RunStep, - event: :"thread.run.step.expired" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_expired - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStepStreamEvent::thread_run_step_expired + def initialize: ( + data: OpenAI::Beta::Threads::Runs::RunStep, + ?event: :"thread.run.step.expired" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Runs::RunStep, + event: :"thread.run.step.expired" + } end - private def self.variants: -> [[:"thread.run.step.created", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated], [:"thread.run.step.in_progress", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress], [:"thread.run.step.delta", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta], [:"thread.run.step.completed", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted], [:"thread.run.step.failed", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed], [:"thread.run.step.cancelled", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled], [:"thread.run.step.expired", OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::run_step_stream_event] end end end diff --git a/sig/openai/models/beta/run_stream_event.rbs b/sig/openai/models/beta/run_stream_event.rbs index ae28ae5e..4bffb3ac 100644 --- a/sig/openai/models/beta/run_stream_event.rbs +++ b/sig/openai/models/beta/run_stream_event.rbs @@ -2,259 +2,214 @@ module OpenAI module Models module Beta type run_stream_event = - OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated - | OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued - | OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress - | OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction - | OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted - | OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete - | OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed - | OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling - | OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled - | OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired - - class RunStreamEvent < OpenAI::Union + OpenAI::Beta::RunStreamEvent::ThreadRunCreated + | OpenAI::Beta::RunStreamEvent::ThreadRunQueued + | OpenAI::Beta::RunStreamEvent::ThreadRunInProgress + | OpenAI::Beta::RunStreamEvent::ThreadRunRequiresAction + | OpenAI::Beta::RunStreamEvent::ThreadRunCompleted + | OpenAI::Beta::RunStreamEvent::ThreadRunIncomplete + | OpenAI::Beta::RunStreamEvent::ThreadRunFailed + | OpenAI::Beta::RunStreamEvent::ThreadRunCancelling + | OpenAI::Beta::RunStreamEvent::ThreadRunCancelled + | OpenAI::Beta::RunStreamEvent::ThreadRunExpired + + module RunStreamEvent + extend OpenAI::Internal::Type::Union + type thread_run_created = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.created" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.created" } - class ThreadRunCreated < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunCreated < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.created" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.created" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStreamEvent::thread_run_created - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_created + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.created" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.created" + } end type thread_run_queued = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.queued" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.queued" } - class ThreadRunQueued < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunQueued < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.queued" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.queued" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStreamEvent::thread_run_queued - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_queued + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.queued" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.queued" + } end type thread_run_in_progress = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.in_progress" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.in_progress" } - class ThreadRunInProgress < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.in_progress" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.in_progress" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStreamEvent::thread_run_in_progress - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_in_progress + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.in_progress" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.in_progress" + } end type thread_run_requires_action = { - data: OpenAI::Models::Beta::Threads::Run, + data: OpenAI::Beta::Threads::Run, event: :"thread.run.requires_action" } - class ThreadRunRequiresAction < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.requires_action" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.requires_action" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStreamEvent::thread_run_requires_action - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_requires_action + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.requires_action" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.requires_action" + } end type thread_run_completed = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.completed" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.completed" } - class ThreadRunCompleted < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.completed" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.completed" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStreamEvent::thread_run_completed - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_completed + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.completed" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.completed" + } end type thread_run_incomplete = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.incomplete" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.incomplete" } - class ThreadRunIncomplete < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.incomplete" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.incomplete" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStreamEvent::thread_run_incomplete - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_incomplete + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.incomplete" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.incomplete" + } end type thread_run_failed = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.failed" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.failed" } - class ThreadRunFailed < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunFailed < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.failed" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.failed" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStreamEvent::thread_run_failed - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_failed + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.failed" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.failed" + } end type thread_run_cancelling = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.cancelling" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.cancelling" } - class ThreadRunCancelling < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.cancelling" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.cancelling" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStreamEvent::thread_run_cancelling - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_cancelling + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.cancelling" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.cancelling" + } end type thread_run_cancelled = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.cancelled" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.cancelled" } - class ThreadRunCancelled < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.cancelled" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.cancelled" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStreamEvent::thread_run_cancelled - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_cancelled + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.cancelled" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.cancelled" + } end type thread_run_expired = - { - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.expired" - } + { data: OpenAI::Beta::Threads::Run, event: :"thread.run.expired" } - class ThreadRunExpired < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Threads::Run + class ThreadRunExpired < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Threads::Run attr_accessor event: :"thread.run.expired" - def initialize: - ( - data: OpenAI::Models::Beta::Threads::Run, - event: :"thread.run.expired" - ) -> void - | ( - ?OpenAI::Models::Beta::RunStreamEvent::thread_run_expired - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::RunStreamEvent::thread_run_expired + def initialize: ( + data: OpenAI::Beta::Threads::Run, + ?event: :"thread.run.expired" + ) -> void + + def to_hash: -> { + data: OpenAI::Beta::Threads::Run, + event: :"thread.run.expired" + } end - private def self.variants: -> [[:"thread.run.created", OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated], [:"thread.run.queued", OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued], [:"thread.run.in_progress", OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress], [:"thread.run.requires_action", OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction], [:"thread.run.completed", OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted], [:"thread.run.incomplete", OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete], [:"thread.run.failed", OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed], [:"thread.run.cancelling", OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling], [:"thread.run.cancelled", OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled], [:"thread.run.expired", OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::run_stream_event] end end end diff --git a/sig/openai/models/beta/thread.rbs b/sig/openai/models/beta/thread.rbs index 14d481b1..98eb5490 100644 --- a/sig/openai/models/beta/thread.rbs +++ b/sig/openai/models/beta/thread.rbs @@ -7,10 +7,10 @@ module OpenAI created_at: Integer, metadata: OpenAI::Models::metadata?, object: :thread, - tool_resources: OpenAI::Models::Beta::Thread::ToolResources? + tool_resources: OpenAI::Beta::Thread::ToolResources? } - class Thread < OpenAI::BaseModel + class Thread < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created_at: Integer @@ -19,83 +19,75 @@ module OpenAI attr_accessor object: :thread - attr_accessor tool_resources: OpenAI::Models::Beta::Thread::ToolResources? + attr_accessor tool_resources: OpenAI::Beta::Thread::ToolResources? - def initialize: - ( - id: String, - created_at: Integer, - metadata: OpenAI::Models::metadata?, - tool_resources: OpenAI::Models::Beta::Thread::ToolResources?, - object: :thread - ) -> void - | (?OpenAI::Models::Beta::thread | OpenAI::BaseModel data) -> void + def initialize: ( + id: String, + created_at: Integer, + metadata: OpenAI::Models::metadata?, + tool_resources: OpenAI::Beta::Thread::ToolResources?, + ?object: :thread + ) -> void - def to_hash: -> OpenAI::Models::Beta::thread + def to_hash: -> { + id: String, + created_at: Integer, + metadata: OpenAI::Models::metadata?, + object: :thread, + tool_resources: OpenAI::Beta::Thread::ToolResources? + } type tool_resources = { - code_interpreter: OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::Thread::ToolResources::FileSearch + code_interpreter: OpenAI::Beta::Thread::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::Thread::ToolResources::FileSearch } - class ToolResources < OpenAI::BaseModel - attr_reader code_interpreter: OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter? + class ToolResources < OpenAI::Internal::Type::BaseModel + attr_reader code_interpreter: OpenAI::Beta::Thread::ToolResources::CodeInterpreter? def code_interpreter=: ( - OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter - ) -> OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter + OpenAI::Beta::Thread::ToolResources::CodeInterpreter + ) -> OpenAI::Beta::Thread::ToolResources::CodeInterpreter - attr_reader file_search: OpenAI::Models::Beta::Thread::ToolResources::FileSearch? + attr_reader file_search: OpenAI::Beta::Thread::ToolResources::FileSearch? def file_search=: ( - OpenAI::Models::Beta::Thread::ToolResources::FileSearch - ) -> OpenAI::Models::Beta::Thread::ToolResources::FileSearch + OpenAI::Beta::Thread::ToolResources::FileSearch + ) -> OpenAI::Beta::Thread::ToolResources::FileSearch - def initialize: - ( - code_interpreter: OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::Thread::ToolResources::FileSearch - ) -> void - | ( - ?OpenAI::Models::Beta::Thread::tool_resources - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?code_interpreter: OpenAI::Beta::Thread::ToolResources::CodeInterpreter, + ?file_search: OpenAI::Beta::Thread::ToolResources::FileSearch + ) -> void - def to_hash: -> OpenAI::Models::Beta::Thread::tool_resources + def to_hash: -> { + code_interpreter: OpenAI::Beta::Thread::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::Thread::ToolResources::FileSearch + } type code_interpreter = { file_ids: ::Array[String] } - class CodeInterpreter < OpenAI::BaseModel + class CodeInterpreter < OpenAI::Internal::Type::BaseModel attr_reader file_ids: ::Array[String]? def file_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (file_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::Thread::ToolResources::code_interpreter - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::Thread::ToolResources::code_interpreter + def to_hash: -> { file_ids: ::Array[String] } end type file_search = { vector_store_ids: ::Array[String] } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_reader vector_store_ids: ::Array[String]? def vector_store_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (vector_store_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::Thread::ToolResources::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: (?vector_store_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::Thread::ToolResources::file_search + def to_hash: -> { vector_store_ids: ::Array[String] } end end end diff --git a/sig/openai/models/beta/thread_create_and_run_params.rbs b/sig/openai/models/beta/thread_create_and_run_params.rbs index 0f7e5730..4dd4c103 100644 --- a/sig/openai/models/beta/thread_create_and_run_params.rbs +++ b/sig/openai/models/beta/thread_create_and_run_params.rbs @@ -12,18 +12,18 @@ module OpenAI parallel_tool_calls: bool, response_format: OpenAI::Models::Beta::assistant_response_format_option?, temperature: Float?, - thread: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, + thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread, tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, - tool_resources: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources?, - tools: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::tool]?, + tool_resources: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources?, + tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, top_p: Float?, - truncation_strategy: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy? + truncation_strategy: OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class ThreadCreateAndRunParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor assistant_id: String @@ -45,136 +45,150 @@ module OpenAI attr_accessor temperature: Float? - attr_reader thread: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread? + attr_reader thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread? def thread=: ( - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread - ) -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread + OpenAI::Beta::ThreadCreateAndRunParams::Thread + ) -> OpenAI::Beta::ThreadCreateAndRunParams::Thread attr_accessor tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option? - attr_accessor tool_resources: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources? + attr_accessor tool_resources: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources? - attr_accessor tools: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::tool]? + attr_accessor tools: ::Array[OpenAI::Models::Beta::assistant_tool]? attr_accessor top_p: Float? - attr_accessor truncation_strategy: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy? + attr_accessor truncation_strategy: OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy? - def initialize: - ( - assistant_id: String, - instructions: String?, - max_completion_tokens: Integer?, - max_prompt_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - model: OpenAI::Models::Beta::ThreadCreateAndRunParams::model?, - parallel_tool_calls: bool, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - thread: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, - tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, - tool_resources: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources?, - tools: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::tool]?, - top_p: Float?, - truncation_strategy: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::thread_create_and_run_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::thread_create_and_run_params + def initialize: ( + assistant_id: String, + ?instructions: String?, + ?max_completion_tokens: Integer?, + ?max_prompt_tokens: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::Beta::ThreadCreateAndRunParams::model?, + ?parallel_tool_calls: bool, + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread, + ?tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, + ?tool_resources: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources?, + ?tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, + ?top_p: Float?, + ?truncation_strategy: OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy?, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + assistant_id: String, + instructions: String?, + max_completion_tokens: Integer?, + max_prompt_tokens: Integer?, + metadata: OpenAI::Models::metadata?, + model: OpenAI::Models::Beta::ThreadCreateAndRunParams::model?, + parallel_tool_calls: bool, + response_format: OpenAI::Models::Beta::assistant_response_format_option?, + temperature: Float?, + thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread, + tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, + tool_resources: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources?, + tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, + top_p: Float?, + truncation_strategy: OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy?, + request_options: OpenAI::RequestOptions + } type model = String | OpenAI::Models::chat_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::model] end type thread = { - messages: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message], + messages: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message], metadata: OpenAI::Models::metadata?, - tool_resources: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources? + tool_resources: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources? } - class Thread < OpenAI::BaseModel - attr_reader messages: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message]? + class Thread < OpenAI::Internal::Type::BaseModel + attr_reader messages: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message]? def messages=: ( - ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message] - ) -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message] + ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message] + ) -> ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message] attr_accessor metadata: OpenAI::Models::metadata? - attr_accessor tool_resources: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources? + attr_accessor tool_resources: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources? - def initialize: - ( - messages: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message], - metadata: OpenAI::Models::metadata?, - tool_resources: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources? - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::thread - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?messages: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message], + ?metadata: OpenAI::Models::metadata?, + ?tool_resources: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources? + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::thread + def to_hash: -> { + messages: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message], + metadata: OpenAI::Models::metadata?, + tool_resources: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources? + } type message = { content: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::content, role: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::role, - attachments: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]?, + attachments: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]?, metadata: OpenAI::Models::metadata? } - class Message < OpenAI::BaseModel + class Message < OpenAI::Internal::Type::BaseModel attr_accessor content: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::content attr_accessor role: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::role - attr_accessor attachments: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]? + attr_accessor attachments: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]? attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - content: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::content, - role: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::role, - attachments: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]?, - metadata: OpenAI::Models::metadata? - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::message - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::content, + role: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::role, + ?attachments: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]?, + ?metadata: OpenAI::Models::metadata? + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::message + def to_hash: -> { + content: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::content, + role: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::role, + attachments: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]?, + metadata: OpenAI::Models::metadata? + } type content = String | ::Array[OpenAI::Models::Beta::Threads::message_content_part_param] - class Content < OpenAI::Union - type message_content_part_param_array = - ::Array[OpenAI::Models::Beta::Threads::message_content_part_param] + module Content + extend OpenAI::Internal::Type::Union - MessageContentPartParamArray: message_content_part_param_array + def self?.variants: -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::content] - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]]] + MessageContentPartParamArray: OpenAI::Internal::Type::Converter end type role = :user | :assistant - class Role < OpenAI::Enum + module Role + extend OpenAI::Internal::Type::Enum + USER: :user ASSISTANT: :assistant - def self.values: -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::role] + def self?.values: -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::role] end type attachment = @@ -183,7 +197,7 @@ module OpenAI tools: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::tool] } - class Attachment < OpenAI::BaseModel + class Attachment < OpenAI::Internal::Type::BaseModel attr_reader file_id: String? def file_id=: (String) -> String @@ -194,119 +208,105 @@ module OpenAI ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::tool] ) -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::tool] - def initialize: - ( - file_id: String, - tools: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::tool] - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::attachment - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?file_id: String, + ?tools: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::tool] + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::attachment + def to_hash: -> { + file_id: String, + tools: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::tool] + } type tool = - OpenAI::Models::Beta::CodeInterpreterTool - | OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch + OpenAI::Beta::CodeInterpreterTool + | OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch + + module Tool + extend OpenAI::Internal::Type::Union - class Tool < OpenAI::Union type file_search = { type: :file_search } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_accessor type: :file_search - def initialize: - (type: :file_search) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :file_search) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::file_search + def to_hash: -> { type: :file_search } end - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::CodeInterpreterTool], [:file_search, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::tool] end end end type tool_resources = { - code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch + code_interpreter: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch } - class ToolResources < OpenAI::BaseModel - attr_reader code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter? + class ToolResources < OpenAI::Internal::Type::BaseModel + attr_reader code_interpreter: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter? def code_interpreter=: ( - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter - ) -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter + ) -> OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter - attr_reader file_search: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch? + attr_reader file_search: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch? def file_search=: ( - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch - ) -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch + ) -> OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch - def initialize: - ( - code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::tool_resources - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?code_interpreter: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, + ?file_search: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::tool_resources + def to_hash: -> { + code_interpreter: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch + } type code_interpreter = { file_ids: ::Array[String] } - class CodeInterpreter < OpenAI::BaseModel + class CodeInterpreter < OpenAI::Internal::Type::BaseModel attr_reader file_ids: ::Array[String]? def file_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (file_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::code_interpreter - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::code_interpreter + def to_hash: -> { file_ids: ::Array[String] } end type file_search = { vector_store_ids: ::Array[String], - vector_stores: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] + vector_stores: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_reader vector_store_ids: ::Array[String]? def vector_store_ids=: (::Array[String]) -> ::Array[String] - attr_reader vector_stores: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore]? + attr_reader vector_stores: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore]? def vector_stores=: ( - ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] - ) -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] + ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] + ) -> ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] - def initialize: - ( - vector_store_ids: ::Array[String], - vector_stores: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?vector_store_ids: ::Array[String], + ?vector_stores: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::file_search + def to_hash: -> { + vector_store_ids: ::Array[String], + vector_stores: ::Array[OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore] + } type vector_store = { @@ -315,7 +315,7 @@ module OpenAI metadata: OpenAI::Models::metadata? } - class VectorStore < OpenAI::BaseModel + class VectorStore < OpenAI::Internal::Type::BaseModel attr_reader chunking_strategy: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::chunking_strategy? def chunking_strategy=: ( @@ -328,61 +328,55 @@ module OpenAI attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - chunking_strategy: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::chunking_strategy, - file_ids: ::Array[String], - metadata: OpenAI::Models::metadata? - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::vector_store - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?chunking_strategy: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::chunking_strategy, + ?file_ids: ::Array[String], + ?metadata: OpenAI::Models::metadata? + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::vector_store + def to_hash: -> { + chunking_strategy: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::chunking_strategy, + file_ids: ::Array[String], + metadata: OpenAI::Models::metadata? + } type chunking_strategy = - OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto - | OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto + | OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + + module ChunkingStrategy + extend OpenAI::Internal::Type::Union - class ChunkingStrategy < OpenAI::Union type auto = { type: :auto } - class Auto < OpenAI::BaseModel + class Auto < OpenAI::Internal::Type::BaseModel attr_accessor type: :auto - def initialize: - (type: :auto) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::auto - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :auto) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::auto + def to_hash: -> { type: :auto } end type static = { - static: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + static: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, type: :static } - class Static < OpenAI::BaseModel - attr_accessor static: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static + class Static < OpenAI::Internal::Type::BaseModel + attr_accessor static: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static attr_accessor type: :static - def initialize: - ( - static: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, - type: :static - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::static - | OpenAI::BaseModel data - ) -> void + def initialize: ( + static: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + ?type: :static + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::static + def to_hash: -> { + static: OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + type: :static + } type static = { @@ -390,26 +384,24 @@ module OpenAI max_chunk_size_tokens: Integer } - class Static < OpenAI::BaseModel + class Static < OpenAI::Internal::Type::BaseModel attr_accessor chunk_overlap_tokens: Integer attr_accessor max_chunk_size_tokens: Integer - def initialize: - ( - chunk_overlap_tokens: Integer, - max_chunk_size_tokens: Integer - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::static - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::static + def initialize: ( + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + ) -> void + + def to_hash: -> { + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + } end end - private def self.variants: -> [[:auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [:static, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::chunking_strategy] end end end @@ -418,109 +410,88 @@ module OpenAI type tool_resources = { - code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch + code_interpreter: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch } - class ToolResources < OpenAI::BaseModel - attr_reader code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter? + class ToolResources < OpenAI::Internal::Type::BaseModel + attr_reader code_interpreter: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter? def code_interpreter=: ( - OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter - ) -> OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter + ) -> OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter - attr_reader file_search: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch? + attr_reader file_search: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch? def file_search=: ( - OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch - ) -> OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch + OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch + ) -> OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch - def initialize: - ( - code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::tool_resources - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?code_interpreter: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, + ?file_search: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::tool_resources + def to_hash: -> { + code_interpreter: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch + } type code_interpreter = { file_ids: ::Array[String] } - class CodeInterpreter < OpenAI::BaseModel + class CodeInterpreter < OpenAI::Internal::Type::BaseModel attr_reader file_ids: ::Array[String]? def file_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (file_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::code_interpreter - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::code_interpreter + def to_hash: -> { file_ids: ::Array[String] } end type file_search = { vector_store_ids: ::Array[String] } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_reader vector_store_ids: ::Array[String]? def vector_store_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (vector_store_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: (?vector_store_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::file_search + def to_hash: -> { vector_store_ids: ::Array[String] } end end - type tool = - OpenAI::Models::Beta::CodeInterpreterTool - | OpenAI::Models::Beta::FileSearchTool - | OpenAI::Models::Beta::FunctionTool - - class Tool < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Beta::CodeInterpreterTool], [nil, OpenAI::Models::Beta::FileSearchTool], [nil, OpenAI::Models::Beta::FunctionTool]] - end - type truncation_strategy = { type: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::type_, last_messages: Integer? } - class TruncationStrategy < OpenAI::BaseModel + class TruncationStrategy < OpenAI::Internal::Type::BaseModel attr_accessor type: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::type_ attr_accessor last_messages: Integer? - def initialize: - ( - type: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::type_, - last_messages: Integer? - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateAndRunParams::truncation_strategy - | OpenAI::BaseModel data - ) -> void + def initialize: ( + type: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::type_, + ?last_messages: Integer? + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateAndRunParams::truncation_strategy + def to_hash: -> { + type: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::type_, + last_messages: Integer? + } type type_ = :auto | :last_messages - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + AUTO: :auto LAST_MESSAGES: :last_messages - def self.values: -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::type_] + def self?.values: -> ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::type_] end end end diff --git a/sig/openai/models/beta/thread_create_params.rbs b/sig/openai/models/beta/thread_create_params.rbs index cb189c09..e8d03f9c 100644 --- a/sig/openai/models/beta/thread_create_params.rbs +++ b/sig/openai/models/beta/thread_create_params.rbs @@ -3,90 +3,92 @@ module OpenAI module Beta type thread_create_params = { - messages: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message], + messages: ::Array[OpenAI::Beta::ThreadCreateParams::Message], metadata: OpenAI::Models::metadata?, - tool_resources: OpenAI::Models::Beta::ThreadCreateParams::ToolResources? + tool_resources: OpenAI::Beta::ThreadCreateParams::ToolResources? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class ThreadCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ThreadCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - attr_reader messages: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message]? + attr_reader messages: ::Array[OpenAI::Beta::ThreadCreateParams::Message]? def messages=: ( - ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message] - ) -> ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message] + ::Array[OpenAI::Beta::ThreadCreateParams::Message] + ) -> ::Array[OpenAI::Beta::ThreadCreateParams::Message] attr_accessor metadata: OpenAI::Models::metadata? - attr_accessor tool_resources: OpenAI::Models::Beta::ThreadCreateParams::ToolResources? + attr_accessor tool_resources: OpenAI::Beta::ThreadCreateParams::ToolResources? - def initialize: - ( - messages: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message], - metadata: OpenAI::Models::metadata?, - tool_resources: OpenAI::Models::Beta::ThreadCreateParams::ToolResources?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::thread_create_params | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?messages: ::Array[OpenAI::Beta::ThreadCreateParams::Message], + ?metadata: OpenAI::Models::metadata?, + ?tool_resources: OpenAI::Beta::ThreadCreateParams::ToolResources?, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::thread_create_params + def to_hash: -> { + messages: ::Array[OpenAI::Beta::ThreadCreateParams::Message], + metadata: OpenAI::Models::metadata?, + tool_resources: OpenAI::Beta::ThreadCreateParams::ToolResources?, + request_options: OpenAI::RequestOptions + } type message = { content: OpenAI::Models::Beta::ThreadCreateParams::Message::content, role: OpenAI::Models::Beta::ThreadCreateParams::Message::role, - attachments: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment]?, + attachments: ::Array[OpenAI::Beta::ThreadCreateParams::Message::Attachment]?, metadata: OpenAI::Models::metadata? } - class Message < OpenAI::BaseModel + class Message < OpenAI::Internal::Type::BaseModel attr_accessor content: OpenAI::Models::Beta::ThreadCreateParams::Message::content attr_accessor role: OpenAI::Models::Beta::ThreadCreateParams::Message::role - attr_accessor attachments: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment]? + attr_accessor attachments: ::Array[OpenAI::Beta::ThreadCreateParams::Message::Attachment]? attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - content: OpenAI::Models::Beta::ThreadCreateParams::Message::content, - role: OpenAI::Models::Beta::ThreadCreateParams::Message::role, - attachments: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment]?, - metadata: OpenAI::Models::metadata? - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateParams::message - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Beta::ThreadCreateParams::Message::content, + role: OpenAI::Models::Beta::ThreadCreateParams::Message::role, + ?attachments: ::Array[OpenAI::Beta::ThreadCreateParams::Message::Attachment]?, + ?metadata: OpenAI::Models::metadata? + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::message + def to_hash: -> { + content: OpenAI::Models::Beta::ThreadCreateParams::Message::content, + role: OpenAI::Models::Beta::ThreadCreateParams::Message::role, + attachments: ::Array[OpenAI::Beta::ThreadCreateParams::Message::Attachment]?, + metadata: OpenAI::Models::metadata? + } type content = String | ::Array[OpenAI::Models::Beta::Threads::message_content_part_param] - class Content < OpenAI::Union - type message_content_part_param_array = - ::Array[OpenAI::Models::Beta::Threads::message_content_part_param] + module Content + extend OpenAI::Internal::Type::Union - MessageContentPartParamArray: message_content_part_param_array + def self?.variants: -> ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::content] - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]]] + MessageContentPartParamArray: OpenAI::Internal::Type::Converter end type role = :user | :assistant - class Role < OpenAI::Enum + module Role + extend OpenAI::Internal::Type::Enum + USER: :user ASSISTANT: :assistant - def self.values: -> ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::role] + def self?.values: -> ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::role] end type attachment = @@ -95,7 +97,7 @@ module OpenAI tools: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::tool] } - class Attachment < OpenAI::BaseModel + class Attachment < OpenAI::Internal::Type::BaseModel attr_reader file_id: String? def file_id=: (String) -> String @@ -106,119 +108,105 @@ module OpenAI ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::tool] ) -> ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::tool] - def initialize: - ( - file_id: String, - tools: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::tool] - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateParams::Message::attachment - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?file_id: String, + ?tools: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::tool] + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::Message::attachment + def to_hash: -> { + file_id: String, + tools: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::tool] + } type tool = - OpenAI::Models::Beta::CodeInterpreterTool - | OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch + OpenAI::Beta::CodeInterpreterTool + | OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch + + module Tool + extend OpenAI::Internal::Type::Union - class Tool < OpenAI::Union type file_search = { type: :file_search } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_accessor type: :file_search - def initialize: - (type: :file_search) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :file_search) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::file_search + def to_hash: -> { type: :file_search } end - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::CodeInterpreterTool], [:file_search, OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::tool] end end end type tool_resources = { - code_interpreter: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch + code_interpreter: OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch } - class ToolResources < OpenAI::BaseModel - attr_reader code_interpreter: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter? + class ToolResources < OpenAI::Internal::Type::BaseModel + attr_reader code_interpreter: OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter? def code_interpreter=: ( - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter - ) -> OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter + OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter + ) -> OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter - attr_reader file_search: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch? + attr_reader file_search: OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch? def file_search=: ( - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch - ) -> OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch + ) -> OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch - def initialize: - ( - code_interpreter: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateParams::tool_resources - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?code_interpreter: OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, + ?file_search: OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::tool_resources + def to_hash: -> { + code_interpreter: OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch + } type code_interpreter = { file_ids: ::Array[String] } - class CodeInterpreter < OpenAI::BaseModel + class CodeInterpreter < OpenAI::Internal::Type::BaseModel attr_reader file_ids: ::Array[String]? def file_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (file_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateParams::ToolResources::code_interpreter - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::ToolResources::code_interpreter + def to_hash: -> { file_ids: ::Array[String] } end type file_search = { vector_store_ids: ::Array[String], - vector_stores: ::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] + vector_stores: ::Array[OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_reader vector_store_ids: ::Array[String]? def vector_store_ids=: (::Array[String]) -> ::Array[String] - attr_reader vector_stores: ::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore]? + attr_reader vector_stores: ::Array[OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore]? def vector_stores=: ( - ::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] - ) -> ::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] + ::Array[OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] + ) -> ::Array[OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] - def initialize: - ( - vector_store_ids: ::Array[String], - vector_stores: ::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateParams::ToolResources::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?vector_store_ids: ::Array[String], + ?vector_stores: ::Array[OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::ToolResources::file_search + def to_hash: -> { + vector_store_ids: ::Array[String], + vector_stores: ::Array[OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore] + } type vector_store = { @@ -227,7 +215,7 @@ module OpenAI metadata: OpenAI::Models::metadata? } - class VectorStore < OpenAI::BaseModel + class VectorStore < OpenAI::Internal::Type::BaseModel attr_reader chunking_strategy: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::chunking_strategy? def chunking_strategy=: ( @@ -240,61 +228,55 @@ module OpenAI attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - chunking_strategy: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::chunking_strategy, - file_ids: ::Array[String], - metadata: OpenAI::Models::metadata? - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::vector_store - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?chunking_strategy: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::chunking_strategy, + ?file_ids: ::Array[String], + ?metadata: OpenAI::Models::metadata? + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::vector_store + def to_hash: -> { + chunking_strategy: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::chunking_strategy, + file_ids: ::Array[String], + metadata: OpenAI::Models::metadata? + } type chunking_strategy = - OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto - | OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto + | OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static + + module ChunkingStrategy + extend OpenAI::Internal::Type::Union - class ChunkingStrategy < OpenAI::Union type auto = { type: :auto } - class Auto < OpenAI::BaseModel + class Auto < OpenAI::Internal::Type::BaseModel attr_accessor type: :auto - def initialize: - (type: :auto) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::auto - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :auto) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::auto + def to_hash: -> { type: :auto } end type static = { - static: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + static: OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, type: :static } - class Static < OpenAI::BaseModel - attr_accessor static: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static + class Static < OpenAI::Internal::Type::BaseModel + attr_accessor static: OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static attr_accessor type: :static - def initialize: - ( - static: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, - type: :static - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::static - | OpenAI::BaseModel data - ) -> void + def initialize: ( + static: OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + ?type: :static + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::static + def to_hash: -> { + static: OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static, + type: :static + } type static = { @@ -302,26 +284,24 @@ module OpenAI max_chunk_size_tokens: Integer } - class Static < OpenAI::BaseModel + class Static < OpenAI::Internal::Type::BaseModel attr_accessor chunk_overlap_tokens: Integer attr_accessor max_chunk_size_tokens: Integer - def initialize: - ( - chunk_overlap_tokens: Integer, - max_chunk_size_tokens: Integer - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::static - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::static + def initialize: ( + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + ) -> void + + def to_hash: -> { + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + } end end - private def self.variants: -> [[:auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto], [:static, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::chunking_strategy] end end end diff --git a/sig/openai/models/beta/thread_delete_params.rbs b/sig/openai/models/beta/thread_delete_params.rbs index 42d8665e..c33f5354 100644 --- a/sig/openai/models/beta/thread_delete_params.rbs +++ b/sig/openai/models/beta/thread_delete_params.rbs @@ -1,19 +1,16 @@ module OpenAI module Models module Beta - type thread_delete_params = { } & OpenAI::request_parameters + type thread_delete_params = + { } & OpenAI::Internal::Type::request_parameters - class ThreadDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ThreadDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Beta::thread_delete_params | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::Beta::thread_delete_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/beta/thread_deleted.rbs b/sig/openai/models/beta/thread_deleted.rbs index 137eb97d..c43c132b 100644 --- a/sig/openai/models/beta/thread_deleted.rbs +++ b/sig/openai/models/beta/thread_deleted.rbs @@ -4,20 +4,20 @@ module OpenAI type thread_deleted = { id: String, deleted: bool, object: :"thread.deleted" } - class ThreadDeleted < OpenAI::BaseModel + class ThreadDeleted < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor deleted: bool attr_accessor object: :"thread.deleted" - def initialize: - (id: String, deleted: bool, object: :"thread.deleted") -> void - | ( - ?OpenAI::Models::Beta::thread_deleted | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + deleted: bool, + ?object: :"thread.deleted" + ) -> void - def to_hash: -> OpenAI::Models::Beta::thread_deleted + def to_hash: -> { id: String, deleted: bool, object: :"thread.deleted" } end end end diff --git a/sig/openai/models/beta/thread_retrieve_params.rbs b/sig/openai/models/beta/thread_retrieve_params.rbs index fd9a2212..dd6b78cf 100644 --- a/sig/openai/models/beta/thread_retrieve_params.rbs +++ b/sig/openai/models/beta/thread_retrieve_params.rbs @@ -1,20 +1,16 @@ module OpenAI module Models module Beta - type thread_retrieve_params = { } & OpenAI::request_parameters + type thread_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters - class ThreadRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ThreadRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Beta::thread_retrieve_params - | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::Beta::thread_retrieve_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/beta/thread_stream_event.rbs b/sig/openai/models/beta/thread_stream_event.rbs index 51deb8c1..9c8af3b5 100644 --- a/sig/openai/models/beta/thread_stream_event.rbs +++ b/sig/openai/models/beta/thread_stream_event.rbs @@ -2,14 +2,10 @@ module OpenAI module Models module Beta type thread_stream_event = - { - data: OpenAI::Models::Beta::Thread, - event: :"thread.created", - enabled: bool - } + { data: OpenAI::Beta::Thread, event: :"thread.created", enabled: bool } - class ThreadStreamEvent < OpenAI::BaseModel - attr_accessor data: OpenAI::Models::Beta::Thread + class ThreadStreamEvent < OpenAI::Internal::Type::BaseModel + attr_accessor data: OpenAI::Beta::Thread attr_accessor event: :"thread.created" @@ -17,17 +13,17 @@ module OpenAI def enabled=: (bool) -> bool - def initialize: - ( - data: OpenAI::Models::Beta::Thread, - enabled: bool, - event: :"thread.created" - ) -> void - | ( - ?OpenAI::Models::Beta::thread_stream_event | OpenAI::BaseModel data - ) -> void + def initialize: ( + data: OpenAI::Beta::Thread, + ?enabled: bool, + ?event: :"thread.created" + ) -> void - def to_hash: -> OpenAI::Models::Beta::thread_stream_event + def to_hash: -> { + data: OpenAI::Beta::Thread, + event: :"thread.created", + enabled: bool + } end end end diff --git a/sig/openai/models/beta/thread_update_params.rbs b/sig/openai/models/beta/thread_update_params.rbs index 4462d12c..d5d3375e 100644 --- a/sig/openai/models/beta/thread_update_params.rbs +++ b/sig/openai/models/beta/thread_update_params.rbs @@ -4,93 +4,81 @@ module OpenAI type thread_update_params = { metadata: OpenAI::Models::metadata?, - tool_resources: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources? + tool_resources: OpenAI::Beta::ThreadUpdateParams::ToolResources? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class ThreadUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ThreadUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor metadata: OpenAI::Models::metadata? - attr_accessor tool_resources: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources? + attr_accessor tool_resources: OpenAI::Beta::ThreadUpdateParams::ToolResources? - def initialize: - ( - metadata: OpenAI::Models::metadata?, - tool_resources: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::thread_update_params | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?metadata: OpenAI::Models::metadata?, + ?tool_resources: OpenAI::Beta::ThreadUpdateParams::ToolResources?, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::thread_update_params + def to_hash: -> { + metadata: OpenAI::Models::metadata?, + tool_resources: OpenAI::Beta::ThreadUpdateParams::ToolResources?, + request_options: OpenAI::RequestOptions + } type tool_resources = { - code_interpreter: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch + code_interpreter: OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch } - class ToolResources < OpenAI::BaseModel - attr_reader code_interpreter: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter? + class ToolResources < OpenAI::Internal::Type::BaseModel + attr_reader code_interpreter: OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter? def code_interpreter=: ( - OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter - ) -> OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter + OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter + ) -> OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter - attr_reader file_search: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch? + attr_reader file_search: OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch? def file_search=: ( - OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch - ) -> OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch + OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch + ) -> OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch - def initialize: - ( - code_interpreter: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, - file_search: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch - ) -> void - | ( - ?OpenAI::Models::Beta::ThreadUpdateParams::tool_resources - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?code_interpreter: OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, + ?file_search: OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch + ) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadUpdateParams::tool_resources + def to_hash: -> { + code_interpreter: OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, + file_search: OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch + } type code_interpreter = { file_ids: ::Array[String] } - class CodeInterpreter < OpenAI::BaseModel + class CodeInterpreter < OpenAI::Internal::Type::BaseModel attr_reader file_ids: ::Array[String]? def file_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (file_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::code_interpreter - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::code_interpreter + def to_hash: -> { file_ids: ::Array[String] } end type file_search = { vector_store_ids: ::Array[String] } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_reader vector_store_ids: ::Array[String]? def vector_store_ids=: (::Array[String]) -> ::Array[String] - def initialize: - (vector_store_ids: ::Array[String]) -> void - | ( - ?OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: (?vector_store_ids: ::Array[String]) -> void - def to_hash: -> OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::file_search + def to_hash: -> { vector_store_ids: ::Array[String] } end end end diff --git a/sig/openai/models/beta/threads/annotation.rbs b/sig/openai/models/beta/threads/annotation.rbs index 547b90c2..872a6668 100644 --- a/sig/openai/models/beta/threads/annotation.rbs +++ b/sig/openai/models/beta/threads/annotation.rbs @@ -3,11 +3,13 @@ module OpenAI module Beta module Threads type annotation = - OpenAI::Models::Beta::Threads::FileCitationAnnotation - | OpenAI::Models::Beta::Threads::FilePathAnnotation + OpenAI::Beta::Threads::FileCitationAnnotation + | OpenAI::Beta::Threads::FilePathAnnotation - class Annotation < OpenAI::Union - private def self.variants: -> [[:file_citation, OpenAI::Models::Beta::Threads::FileCitationAnnotation], [:file_path, OpenAI::Models::Beta::Threads::FilePathAnnotation]] + module Annotation + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::annotation] end end end diff --git a/sig/openai/models/beta/threads/annotation_delta.rbs b/sig/openai/models/beta/threads/annotation_delta.rbs index 80ab4f91..4ea5f3a3 100644 --- a/sig/openai/models/beta/threads/annotation_delta.rbs +++ b/sig/openai/models/beta/threads/annotation_delta.rbs @@ -3,11 +3,13 @@ module OpenAI module Beta module Threads type annotation_delta = - OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation - | OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation + OpenAI::Beta::Threads::FileCitationDeltaAnnotation + | OpenAI::Beta::Threads::FilePathDeltaAnnotation - class AnnotationDelta < OpenAI::Union - private def self.variants: -> [[:file_citation, OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation], [:file_path, OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation]] + module AnnotationDelta + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::annotation_delta] end end end diff --git a/sig/openai/models/beta/threads/file_citation_annotation.rbs b/sig/openai/models/beta/threads/file_citation_annotation.rbs index 8c9eadaa..9b224788 100644 --- a/sig/openai/models/beta/threads/file_citation_annotation.rbs +++ b/sig/openai/models/beta/threads/file_citation_annotation.rbs @@ -5,16 +5,16 @@ module OpenAI type file_citation_annotation = { end_index: Integer, - file_citation: OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation, + file_citation: OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation, start_index: Integer, text: String, type: :file_citation } - class FileCitationAnnotation < OpenAI::BaseModel + class FileCitationAnnotation < OpenAI::Internal::Type::BaseModel attr_accessor end_index: Integer - attr_accessor file_citation: OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation + attr_accessor file_citation: OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation attr_accessor start_index: Integer @@ -22,34 +22,30 @@ module OpenAI attr_accessor type: :file_citation - def initialize: - ( - end_index: Integer, - file_citation: OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation, - start_index: Integer, - text: String, - type: :file_citation - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::file_citation_annotation - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::Threads::file_citation_annotation + def initialize: ( + end_index: Integer, + file_citation: OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation, + start_index: Integer, + text: String, + ?type: :file_citation + ) -> void + + def to_hash: -> { + end_index: Integer, + file_citation: OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation, + start_index: Integer, + text: String, + type: :file_citation + } type file_citation = { file_id: String } - class FileCitation < OpenAI::BaseModel + class FileCitation < OpenAI::Internal::Type::BaseModel attr_accessor file_id: String - def initialize: - (file_id: String) -> void - | ( - ?OpenAI::Models::Beta::Threads::FileCitationAnnotation::file_citation - | OpenAI::BaseModel data - ) -> void + def initialize: (file_id: String) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::FileCitationAnnotation::file_citation + def to_hash: -> { file_id: String } end end end diff --git a/sig/openai/models/beta/threads/file_citation_delta_annotation.rbs b/sig/openai/models/beta/threads/file_citation_delta_annotation.rbs index 933da760..8bb06279 100644 --- a/sig/openai/models/beta/threads/file_citation_delta_annotation.rbs +++ b/sig/openai/models/beta/threads/file_citation_delta_annotation.rbs @@ -7,12 +7,12 @@ module OpenAI index: Integer, type: :file_citation, end_index: Integer, - file_citation: OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, + file_citation: OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, start_index: Integer, text: String } - class FileCitationDeltaAnnotation < OpenAI::BaseModel + class FileCitationDeltaAnnotation < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_accessor type: :file_citation @@ -21,11 +21,11 @@ module OpenAI def end_index=: (Integer) -> Integer - attr_reader file_citation: OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation? + attr_reader file_citation: OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation? def file_citation=: ( - OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation - ) -> OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation + OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation + ) -> OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation attr_reader start_index: Integer? @@ -35,25 +35,27 @@ module OpenAI def text=: (String) -> String - def initialize: - ( - index: Integer, - end_index: Integer, - file_citation: OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, - start_index: Integer, - text: String, - type: :file_citation - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::file_citation_delta_annotation - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::Threads::file_citation_delta_annotation + def initialize: ( + index: Integer, + ?end_index: Integer, + ?file_citation: OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, + ?start_index: Integer, + ?text: String, + ?type: :file_citation + ) -> void + + def to_hash: -> { + index: Integer, + type: :file_citation, + end_index: Integer, + file_citation: OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, + start_index: Integer, + text: String + } type file_citation = { file_id: String, quote: String } - class FileCitation < OpenAI::BaseModel + class FileCitation < OpenAI::Internal::Type::BaseModel attr_reader file_id: String? def file_id=: (String) -> String @@ -62,14 +64,9 @@ module OpenAI def quote=: (String) -> String - def initialize: - (file_id: String, quote: String) -> void - | ( - ?OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::file_citation - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_id: String, ?quote: String) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::file_citation + def to_hash: -> { file_id: String, quote: String } end end end diff --git a/sig/openai/models/beta/threads/file_path_annotation.rbs b/sig/openai/models/beta/threads/file_path_annotation.rbs index 7c590adb..95d59fc3 100644 --- a/sig/openai/models/beta/threads/file_path_annotation.rbs +++ b/sig/openai/models/beta/threads/file_path_annotation.rbs @@ -5,16 +5,16 @@ module OpenAI type file_path_annotation = { end_index: Integer, - file_path: OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath, + file_path: OpenAI::Beta::Threads::FilePathAnnotation::FilePath, start_index: Integer, text: String, type: :file_path } - class FilePathAnnotation < OpenAI::BaseModel + class FilePathAnnotation < OpenAI::Internal::Type::BaseModel attr_accessor end_index: Integer - attr_accessor file_path: OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath + attr_accessor file_path: OpenAI::Beta::Threads::FilePathAnnotation::FilePath attr_accessor start_index: Integer @@ -22,34 +22,30 @@ module OpenAI attr_accessor type: :file_path - def initialize: - ( - end_index: Integer, - file_path: OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath, - start_index: Integer, - text: String, - type: :file_path - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::file_path_annotation - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::Threads::file_path_annotation + def initialize: ( + end_index: Integer, + file_path: OpenAI::Beta::Threads::FilePathAnnotation::FilePath, + start_index: Integer, + text: String, + ?type: :file_path + ) -> void + + def to_hash: -> { + end_index: Integer, + file_path: OpenAI::Beta::Threads::FilePathAnnotation::FilePath, + start_index: Integer, + text: String, + type: :file_path + } type file_path = { file_id: String } - class FilePath < OpenAI::BaseModel + class FilePath < OpenAI::Internal::Type::BaseModel attr_accessor file_id: String - def initialize: - (file_id: String) -> void - | ( - ?OpenAI::Models::Beta::Threads::FilePathAnnotation::file_path - | OpenAI::BaseModel data - ) -> void + def initialize: (file_id: String) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::FilePathAnnotation::file_path + def to_hash: -> { file_id: String } end end end diff --git a/sig/openai/models/beta/threads/file_path_delta_annotation.rbs b/sig/openai/models/beta/threads/file_path_delta_annotation.rbs index 536a311d..8bac30d8 100644 --- a/sig/openai/models/beta/threads/file_path_delta_annotation.rbs +++ b/sig/openai/models/beta/threads/file_path_delta_annotation.rbs @@ -7,12 +7,12 @@ module OpenAI index: Integer, type: :file_path, end_index: Integer, - file_path: OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath, + file_path: OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath, start_index: Integer, text: String } - class FilePathDeltaAnnotation < OpenAI::BaseModel + class FilePathDeltaAnnotation < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_accessor type: :file_path @@ -21,11 +21,11 @@ module OpenAI def end_index=: (Integer) -> Integer - attr_reader file_path: OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath? + attr_reader file_path: OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath? def file_path=: ( - OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath - ) -> OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath + OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath + ) -> OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath attr_reader start_index: Integer? @@ -35,37 +35,34 @@ module OpenAI def text=: (String) -> String - def initialize: - ( - index: Integer, - end_index: Integer, - file_path: OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath, - start_index: Integer, - text: String, - type: :file_path - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::file_path_delta_annotation - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::Threads::file_path_delta_annotation + def initialize: ( + index: Integer, + ?end_index: Integer, + ?file_path: OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath, + ?start_index: Integer, + ?text: String, + ?type: :file_path + ) -> void + + def to_hash: -> { + index: Integer, + type: :file_path, + end_index: Integer, + file_path: OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath, + start_index: Integer, + text: String + } type file_path = { file_id: String } - class FilePath < OpenAI::BaseModel + class FilePath < OpenAI::Internal::Type::BaseModel attr_reader file_id: String? def file_id=: (String) -> String - def initialize: - (file_id: String) -> void - | ( - ?OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::file_path - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_id: String) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::file_path + def to_hash: -> { file_id: String } end end end diff --git a/sig/openai/models/beta/threads/image_file.rbs b/sig/openai/models/beta/threads/image_file.rbs index 2d2fb9e4..6b59968c 100644 --- a/sig/openai/models/beta/threads/image_file.rbs +++ b/sig/openai/models/beta/threads/image_file.rbs @@ -8,7 +8,7 @@ module OpenAI detail: OpenAI::Models::Beta::Threads::ImageFile::detail } - class ImageFile < OpenAI::BaseModel + class ImageFile < OpenAI::Internal::Type::BaseModel attr_accessor file_id: String attr_reader detail: OpenAI::Models::Beta::Threads::ImageFile::detail? @@ -17,26 +17,26 @@ module OpenAI OpenAI::Models::Beta::Threads::ImageFile::detail ) -> OpenAI::Models::Beta::Threads::ImageFile::detail - def initialize: - ( - file_id: String, - detail: OpenAI::Models::Beta::Threads::ImageFile::detail - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::image_file - | OpenAI::BaseModel data - ) -> void + def initialize: ( + file_id: String, + ?detail: OpenAI::Models::Beta::Threads::ImageFile::detail + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::image_file + def to_hash: -> { + file_id: String, + detail: OpenAI::Models::Beta::Threads::ImageFile::detail + } type detail = :auto | :low | :high - class Detail < OpenAI::Enum + module Detail + extend OpenAI::Internal::Type::Enum + AUTO: :auto LOW: :low HIGH: :high - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::ImageFile::detail] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::ImageFile::detail] end end end diff --git a/sig/openai/models/beta/threads/image_file_content_block.rbs b/sig/openai/models/beta/threads/image_file_content_block.rbs index 5267e2c7..5b91259d 100644 --- a/sig/openai/models/beta/threads/image_file_content_block.rbs +++ b/sig/openai/models/beta/threads/image_file_content_block.rbs @@ -3,27 +3,22 @@ module OpenAI module Beta module Threads type image_file_content_block = - { - image_file: OpenAI::Models::Beta::Threads::ImageFile, - type: :image_file - } + { image_file: OpenAI::Beta::Threads::ImageFile, type: :image_file } - class ImageFileContentBlock < OpenAI::BaseModel - attr_accessor image_file: OpenAI::Models::Beta::Threads::ImageFile + class ImageFileContentBlock < OpenAI::Internal::Type::BaseModel + attr_accessor image_file: OpenAI::Beta::Threads::ImageFile attr_accessor type: :image_file - def initialize: - ( - image_file: OpenAI::Models::Beta::Threads::ImageFile, - type: :image_file - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::image_file_content_block - | OpenAI::BaseModel data - ) -> void + def initialize: ( + image_file: OpenAI::Beta::Threads::ImageFile, + ?type: :image_file + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::image_file_content_block + def to_hash: -> { + image_file: OpenAI::Beta::Threads::ImageFile, + type: :image_file + } end end end diff --git a/sig/openai/models/beta/threads/image_file_delta.rbs b/sig/openai/models/beta/threads/image_file_delta.rbs index bf715b22..2f0784dd 100644 --- a/sig/openai/models/beta/threads/image_file_delta.rbs +++ b/sig/openai/models/beta/threads/image_file_delta.rbs @@ -8,7 +8,7 @@ module OpenAI file_id: String } - class ImageFileDelta < OpenAI::BaseModel + class ImageFileDelta < OpenAI::Internal::Type::BaseModel attr_reader detail: OpenAI::Models::Beta::Threads::ImageFileDelta::detail? def detail=: ( @@ -19,26 +19,26 @@ module OpenAI def file_id=: (String) -> String - def initialize: - ( - detail: OpenAI::Models::Beta::Threads::ImageFileDelta::detail, - file_id: String - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::image_file_delta - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?detail: OpenAI::Models::Beta::Threads::ImageFileDelta::detail, + ?file_id: String + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::image_file_delta + def to_hash: -> { + detail: OpenAI::Models::Beta::Threads::ImageFileDelta::detail, + file_id: String + } type detail = :auto | :low | :high - class Detail < OpenAI::Enum + module Detail + extend OpenAI::Internal::Type::Enum + AUTO: :auto LOW: :low HIGH: :high - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::ImageFileDelta::detail] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::ImageFileDelta::detail] end end end diff --git a/sig/openai/models/beta/threads/image_file_delta_block.rbs b/sig/openai/models/beta/threads/image_file_delta_block.rbs index 1dc26a44..e753fcc9 100644 --- a/sig/openai/models/beta/threads/image_file_delta_block.rbs +++ b/sig/openai/models/beta/threads/image_file_delta_block.rbs @@ -6,32 +6,31 @@ module OpenAI { index: Integer, type: :image_file, - image_file: OpenAI::Models::Beta::Threads::ImageFileDelta + image_file: OpenAI::Beta::Threads::ImageFileDelta } - class ImageFileDeltaBlock < OpenAI::BaseModel + class ImageFileDeltaBlock < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_accessor type: :image_file - attr_reader image_file: OpenAI::Models::Beta::Threads::ImageFileDelta? + attr_reader image_file: OpenAI::Beta::Threads::ImageFileDelta? def image_file=: ( - OpenAI::Models::Beta::Threads::ImageFileDelta - ) -> OpenAI::Models::Beta::Threads::ImageFileDelta + OpenAI::Beta::Threads::ImageFileDelta + ) -> OpenAI::Beta::Threads::ImageFileDelta - def initialize: - ( - index: Integer, - image_file: OpenAI::Models::Beta::Threads::ImageFileDelta, - type: :image_file - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::image_file_delta_block - | OpenAI::BaseModel data - ) -> void + def initialize: ( + index: Integer, + ?image_file: OpenAI::Beta::Threads::ImageFileDelta, + ?type: :image_file + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::image_file_delta_block + def to_hash: -> { + index: Integer, + type: :image_file, + image_file: OpenAI::Beta::Threads::ImageFileDelta + } end end end diff --git a/sig/openai/models/beta/threads/image_url.rbs b/sig/openai/models/beta/threads/image_url.rbs index f786387e..d0a0d066 100644 --- a/sig/openai/models/beta/threads/image_url.rbs +++ b/sig/openai/models/beta/threads/image_url.rbs @@ -8,7 +8,7 @@ module OpenAI detail: OpenAI::Models::Beta::Threads::ImageURL::detail } - class ImageURL < OpenAI::BaseModel + class ImageURL < OpenAI::Internal::Type::BaseModel attr_accessor url: String attr_reader detail: OpenAI::Models::Beta::Threads::ImageURL::detail? @@ -17,25 +17,26 @@ module OpenAI OpenAI::Models::Beta::Threads::ImageURL::detail ) -> OpenAI::Models::Beta::Threads::ImageURL::detail - def initialize: - ( - url: String, - detail: OpenAI::Models::Beta::Threads::ImageURL::detail - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::image_url | OpenAI::BaseModel data - ) -> void + def initialize: ( + url: String, + ?detail: OpenAI::Models::Beta::Threads::ImageURL::detail + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::image_url + def to_hash: -> { + url: String, + detail: OpenAI::Models::Beta::Threads::ImageURL::detail + } type detail = :auto | :low | :high - class Detail < OpenAI::Enum + module Detail + extend OpenAI::Internal::Type::Enum + AUTO: :auto LOW: :low HIGH: :high - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::ImageURL::detail] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::ImageURL::detail] end end end diff --git a/sig/openai/models/beta/threads/image_url_content_block.rbs b/sig/openai/models/beta/threads/image_url_content_block.rbs index 13d90d01..8cc69592 100644 --- a/sig/openai/models/beta/threads/image_url_content_block.rbs +++ b/sig/openai/models/beta/threads/image_url_content_block.rbs @@ -3,27 +3,22 @@ module OpenAI module Beta module Threads type image_url_content_block = - { - image_url: OpenAI::Models::Beta::Threads::ImageURL, - type: :image_url - } + { image_url: OpenAI::Beta::Threads::ImageURL, type: :image_url } - class ImageURLContentBlock < OpenAI::BaseModel - attr_accessor image_url: OpenAI::Models::Beta::Threads::ImageURL + class ImageURLContentBlock < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: OpenAI::Beta::Threads::ImageURL attr_accessor type: :image_url - def initialize: - ( - image_url: OpenAI::Models::Beta::Threads::ImageURL, - type: :image_url - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::image_url_content_block - | OpenAI::BaseModel data - ) -> void + def initialize: ( + image_url: OpenAI::Beta::Threads::ImageURL, + ?type: :image_url + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::image_url_content_block + def to_hash: -> { + image_url: OpenAI::Beta::Threads::ImageURL, + type: :image_url + } end end end diff --git a/sig/openai/models/beta/threads/image_url_delta.rbs b/sig/openai/models/beta/threads/image_url_delta.rbs index 6f5c92ca..2c0721e9 100644 --- a/sig/openai/models/beta/threads/image_url_delta.rbs +++ b/sig/openai/models/beta/threads/image_url_delta.rbs @@ -8,7 +8,7 @@ module OpenAI url: String } - class ImageURLDelta < OpenAI::BaseModel + class ImageURLDelta < OpenAI::Internal::Type::BaseModel attr_reader detail: OpenAI::Models::Beta::Threads::ImageURLDelta::detail? def detail=: ( @@ -19,26 +19,26 @@ module OpenAI def url=: (String) -> String - def initialize: - ( - detail: OpenAI::Models::Beta::Threads::ImageURLDelta::detail, - url: String - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::image_url_delta - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?detail: OpenAI::Models::Beta::Threads::ImageURLDelta::detail, + ?url: String + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::image_url_delta + def to_hash: -> { + detail: OpenAI::Models::Beta::Threads::ImageURLDelta::detail, + url: String + } type detail = :auto | :low | :high - class Detail < OpenAI::Enum + module Detail + extend OpenAI::Internal::Type::Enum + AUTO: :auto LOW: :low HIGH: :high - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::ImageURLDelta::detail] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::ImageURLDelta::detail] end end end diff --git a/sig/openai/models/beta/threads/image_url_delta_block.rbs b/sig/openai/models/beta/threads/image_url_delta_block.rbs index 37b1861a..a0fc6366 100644 --- a/sig/openai/models/beta/threads/image_url_delta_block.rbs +++ b/sig/openai/models/beta/threads/image_url_delta_block.rbs @@ -6,32 +6,31 @@ module OpenAI { index: Integer, type: :image_url, - image_url: OpenAI::Models::Beta::Threads::ImageURLDelta + image_url: OpenAI::Beta::Threads::ImageURLDelta } - class ImageURLDeltaBlock < OpenAI::BaseModel + class ImageURLDeltaBlock < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_accessor type: :image_url - attr_reader image_url: OpenAI::Models::Beta::Threads::ImageURLDelta? + attr_reader image_url: OpenAI::Beta::Threads::ImageURLDelta? def image_url=: ( - OpenAI::Models::Beta::Threads::ImageURLDelta - ) -> OpenAI::Models::Beta::Threads::ImageURLDelta + OpenAI::Beta::Threads::ImageURLDelta + ) -> OpenAI::Beta::Threads::ImageURLDelta - def initialize: - ( - index: Integer, - image_url: OpenAI::Models::Beta::Threads::ImageURLDelta, - type: :image_url - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::image_url_delta_block - | OpenAI::BaseModel data - ) -> void + def initialize: ( + index: Integer, + ?image_url: OpenAI::Beta::Threads::ImageURLDelta, + ?type: :image_url + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::image_url_delta_block + def to_hash: -> { + index: Integer, + type: :image_url, + image_url: OpenAI::Beta::Threads::ImageURLDelta + } end end end diff --git a/sig/openai/models/beta/threads/message.rbs b/sig/openai/models/beta/threads/message.rbs index f501c1d1..0aa6f377 100644 --- a/sig/openai/models/beta/threads/message.rbs +++ b/sig/openai/models/beta/threads/message.rbs @@ -6,12 +6,12 @@ module OpenAI { id: String, assistant_id: String?, - attachments: ::Array[OpenAI::Models::Beta::Threads::Message::Attachment]?, + attachments: ::Array[OpenAI::Beta::Threads::Message::Attachment]?, completed_at: Integer?, content: ::Array[OpenAI::Models::Beta::Threads::message_content], created_at: Integer, incomplete_at: Integer?, - incomplete_details: OpenAI::Models::Beta::Threads::Message::IncompleteDetails?, + incomplete_details: OpenAI::Beta::Threads::Message::IncompleteDetails?, metadata: OpenAI::Models::metadata?, object: :"thread.message", role: OpenAI::Models::Beta::Threads::Message::role, @@ -20,12 +20,12 @@ module OpenAI thread_id: String } - class Message < OpenAI::BaseModel + class Message < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor assistant_id: String? - attr_accessor attachments: ::Array[OpenAI::Models::Beta::Threads::Message::Attachment]? + attr_accessor attachments: ::Array[OpenAI::Beta::Threads::Message::Attachment]? attr_accessor completed_at: Integer? @@ -35,7 +35,7 @@ module OpenAI attr_accessor incomplete_at: Integer? - attr_accessor incomplete_details: OpenAI::Models::Beta::Threads::Message::IncompleteDetails? + attr_accessor incomplete_details: OpenAI::Beta::Threads::Message::IncompleteDetails? attr_accessor metadata: OpenAI::Models::metadata? @@ -49,28 +49,39 @@ module OpenAI attr_accessor thread_id: String - def initialize: - ( - id: String, - assistant_id: String?, - attachments: ::Array[OpenAI::Models::Beta::Threads::Message::Attachment]?, - completed_at: Integer?, - content: ::Array[OpenAI::Models::Beta::Threads::message_content], - created_at: Integer, - incomplete_at: Integer?, - incomplete_details: OpenAI::Models::Beta::Threads::Message::IncompleteDetails?, - metadata: OpenAI::Models::metadata?, - role: OpenAI::Models::Beta::Threads::Message::role, - run_id: String?, - status: OpenAI::Models::Beta::Threads::Message::status, - thread_id: String, - object: :"thread.message" - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::message | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + assistant_id: String?, + attachments: ::Array[OpenAI::Beta::Threads::Message::Attachment]?, + completed_at: Integer?, + content: ::Array[OpenAI::Models::Beta::Threads::message_content], + created_at: Integer, + incomplete_at: Integer?, + incomplete_details: OpenAI::Beta::Threads::Message::IncompleteDetails?, + metadata: OpenAI::Models::metadata?, + role: OpenAI::Models::Beta::Threads::Message::role, + run_id: String?, + status: OpenAI::Models::Beta::Threads::Message::status, + thread_id: String, + ?object: :"thread.message" + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::message + def to_hash: -> { + id: String, + assistant_id: String?, + attachments: ::Array[OpenAI::Beta::Threads::Message::Attachment]?, + completed_at: Integer?, + content: ::Array[OpenAI::Models::Beta::Threads::message_content], + created_at: Integer, + incomplete_at: Integer?, + incomplete_details: OpenAI::Beta::Threads::Message::IncompleteDetails?, + metadata: OpenAI::Models::metadata?, + object: :"thread.message", + role: OpenAI::Models::Beta::Threads::Message::role, + run_id: String?, + status: OpenAI::Models::Beta::Threads::Message::status, + thread_id: String + } type attachment = { @@ -78,7 +89,7 @@ module OpenAI tools: ::Array[OpenAI::Models::Beta::Threads::Message::Attachment::tool] } - class Attachment < OpenAI::BaseModel + class Attachment < OpenAI::Internal::Type::BaseModel attr_reader file_id: String? def file_id=: (String) -> String @@ -89,40 +100,35 @@ module OpenAI ::Array[OpenAI::Models::Beta::Threads::Message::Attachment::tool] ) -> ::Array[OpenAI::Models::Beta::Threads::Message::Attachment::tool] - def initialize: - ( - file_id: String, - tools: ::Array[OpenAI::Models::Beta::Threads::Message::Attachment::tool] - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Message::attachment - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?file_id: String, + ?tools: ::Array[OpenAI::Models::Beta::Threads::Message::Attachment::tool] + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Message::attachment + def to_hash: -> { + file_id: String, + tools: ::Array[OpenAI::Models::Beta::Threads::Message::Attachment::tool] + } type tool = - OpenAI::Models::Beta::CodeInterpreterTool - | OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly + OpenAI::Beta::CodeInterpreterTool + | OpenAI::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly + + module Tool + extend OpenAI::Internal::Type::Union - class Tool < OpenAI::Union type assistant_tools_file_search_type_only = { type: :file_search } - class AssistantToolsFileSearchTypeOnly < OpenAI::BaseModel + class AssistantToolsFileSearchTypeOnly < OpenAI::Internal::Type::BaseModel attr_accessor type: :file_search - def initialize: - (type: :file_search) -> void - | ( - ?OpenAI::Models::Beta::Threads::Message::Attachment::Tool::assistant_tools_file_search_type_only - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :file_search) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Message::Attachment::Tool::assistant_tools_file_search_type_only + def to_hash: -> { type: :file_search } end - private def self.variants: -> [[nil, OpenAI::Models::Beta::CodeInterpreterTool], [nil, OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::Message::Attachment::tool] end end @@ -131,19 +137,16 @@ module OpenAI reason: OpenAI::Models::Beta::Threads::Message::IncompleteDetails::reason } - class IncompleteDetails < OpenAI::BaseModel + class IncompleteDetails < OpenAI::Internal::Type::BaseModel attr_accessor reason: OpenAI::Models::Beta::Threads::Message::IncompleteDetails::reason - def initialize: - ( - reason: OpenAI::Models::Beta::Threads::Message::IncompleteDetails::reason - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Message::incomplete_details - | OpenAI::BaseModel data - ) -> void + def initialize: ( + reason: OpenAI::Models::Beta::Threads::Message::IncompleteDetails::reason + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Message::incomplete_details + def to_hash: -> { + reason: OpenAI::Models::Beta::Threads::Message::IncompleteDetails::reason + } type reason = :content_filter @@ -152,34 +155,40 @@ module OpenAI | :run_expired | :run_failed - class Reason < OpenAI::Enum + module Reason + extend OpenAI::Internal::Type::Enum + CONTENT_FILTER: :content_filter MAX_TOKENS: :max_tokens RUN_CANCELLED: :run_cancelled RUN_EXPIRED: :run_expired RUN_FAILED: :run_failed - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Message::IncompleteDetails::reason] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Message::IncompleteDetails::reason] end end type role = :user | :assistant - class Role < OpenAI::Enum + module Role + extend OpenAI::Internal::Type::Enum + USER: :user ASSISTANT: :assistant - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Message::role] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Message::role] end type status = :in_progress | :incomplete | :completed - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress INCOMPLETE: :incomplete COMPLETED: :completed - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Message::status] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Message::status] end end end diff --git a/sig/openai/models/beta/threads/message_content.rbs b/sig/openai/models/beta/threads/message_content.rbs index 970c2528..35e6ff1f 100644 --- a/sig/openai/models/beta/threads/message_content.rbs +++ b/sig/openai/models/beta/threads/message_content.rbs @@ -3,13 +3,15 @@ module OpenAI module Beta module Threads type message_content = - OpenAI::Models::Beta::Threads::ImageFileContentBlock - | OpenAI::Models::Beta::Threads::ImageURLContentBlock - | OpenAI::Models::Beta::Threads::TextContentBlock - | OpenAI::Models::Beta::Threads::RefusalContentBlock + OpenAI::Beta::Threads::ImageFileContentBlock + | OpenAI::Beta::Threads::ImageURLContentBlock + | OpenAI::Beta::Threads::TextContentBlock + | OpenAI::Beta::Threads::RefusalContentBlock - class MessageContent < OpenAI::Union - private def self.variants: -> [[:image_file, OpenAI::Models::Beta::Threads::ImageFileContentBlock], [:image_url, OpenAI::Models::Beta::Threads::ImageURLContentBlock], [:text, OpenAI::Models::Beta::Threads::TextContentBlock], [:refusal, OpenAI::Models::Beta::Threads::RefusalContentBlock]] + module MessageContent + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::message_content] end end end diff --git a/sig/openai/models/beta/threads/message_content_delta.rbs b/sig/openai/models/beta/threads/message_content_delta.rbs index 88d80be8..6c1edb1d 100644 --- a/sig/openai/models/beta/threads/message_content_delta.rbs +++ b/sig/openai/models/beta/threads/message_content_delta.rbs @@ -3,13 +3,15 @@ module OpenAI module Beta module Threads type message_content_delta = - OpenAI::Models::Beta::Threads::ImageFileDeltaBlock - | OpenAI::Models::Beta::Threads::TextDeltaBlock - | OpenAI::Models::Beta::Threads::RefusalDeltaBlock - | OpenAI::Models::Beta::Threads::ImageURLDeltaBlock + OpenAI::Beta::Threads::ImageFileDeltaBlock + | OpenAI::Beta::Threads::TextDeltaBlock + | OpenAI::Beta::Threads::RefusalDeltaBlock + | OpenAI::Beta::Threads::ImageURLDeltaBlock - class MessageContentDelta < OpenAI::Union - private def self.variants: -> [[:image_file, OpenAI::Models::Beta::Threads::ImageFileDeltaBlock], [:text, OpenAI::Models::Beta::Threads::TextDeltaBlock], [:refusal, OpenAI::Models::Beta::Threads::RefusalDeltaBlock], [:image_url, OpenAI::Models::Beta::Threads::ImageURLDeltaBlock]] + module MessageContentDelta + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::message_content_delta] end end end diff --git a/sig/openai/models/beta/threads/message_content_part_param.rbs b/sig/openai/models/beta/threads/message_content_part_param.rbs index 0fa6c1af..802d8b17 100644 --- a/sig/openai/models/beta/threads/message_content_part_param.rbs +++ b/sig/openai/models/beta/threads/message_content_part_param.rbs @@ -3,12 +3,14 @@ module OpenAI module Beta module Threads type message_content_part_param = - OpenAI::Models::Beta::Threads::ImageFileContentBlock - | OpenAI::Models::Beta::Threads::ImageURLContentBlock - | OpenAI::Models::Beta::Threads::TextContentBlockParam + OpenAI::Beta::Threads::ImageFileContentBlock + | OpenAI::Beta::Threads::ImageURLContentBlock + | OpenAI::Beta::Threads::TextContentBlockParam - class MessageContentPartParam < OpenAI::Union - private def self.variants: -> [[:image_file, OpenAI::Models::Beta::Threads::ImageFileContentBlock], [:image_url, OpenAI::Models::Beta::Threads::ImageURLContentBlock], [:text, OpenAI::Models::Beta::Threads::TextContentBlockParam]] + module MessageContentPartParam + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::message_content_part_param] end end end diff --git a/sig/openai/models/beta/threads/message_create_params.rbs b/sig/openai/models/beta/threads/message_create_params.rbs index d9e425f5..c1229739 100644 --- a/sig/openai/models/beta/threads/message_create_params.rbs +++ b/sig/openai/models/beta/threads/message_create_params.rbs @@ -6,58 +6,60 @@ module OpenAI { content: OpenAI::Models::Beta::Threads::MessageCreateParams::content, role: OpenAI::Models::Beta::Threads::MessageCreateParams::role, - attachments: ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment]?, + attachments: ::Array[OpenAI::Beta::Threads::MessageCreateParams::Attachment]?, metadata: OpenAI::Models::metadata? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class MessageCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class MessageCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor content: OpenAI::Models::Beta::Threads::MessageCreateParams::content attr_accessor role: OpenAI::Models::Beta::Threads::MessageCreateParams::role - attr_accessor attachments: ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment]? + attr_accessor attachments: ::Array[OpenAI::Beta::Threads::MessageCreateParams::Attachment]? attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - content: OpenAI::Models::Beta::Threads::MessageCreateParams::content, - role: OpenAI::Models::Beta::Threads::MessageCreateParams::role, - attachments: ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment]?, - metadata: OpenAI::Models::metadata?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::message_create_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Beta::Threads::MessageCreateParams::content, + role: OpenAI::Models::Beta::Threads::MessageCreateParams::role, + ?attachments: ::Array[OpenAI::Beta::Threads::MessageCreateParams::Attachment]?, + ?metadata: OpenAI::Models::metadata?, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::message_create_params + def to_hash: -> { + content: OpenAI::Models::Beta::Threads::MessageCreateParams::content, + role: OpenAI::Models::Beta::Threads::MessageCreateParams::role, + attachments: ::Array[OpenAI::Beta::Threads::MessageCreateParams::Attachment]?, + metadata: OpenAI::Models::metadata?, + request_options: OpenAI::RequestOptions + } type content = String | ::Array[OpenAI::Models::Beta::Threads::message_content_part_param] - class Content < OpenAI::Union - type message_content_part_param_array = - ::Array[OpenAI::Models::Beta::Threads::message_content_part_param] + module Content + extend OpenAI::Internal::Type::Union - MessageContentPartParamArray: message_content_part_param_array + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::content] - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]]] + MessageContentPartParamArray: OpenAI::Internal::Type::Converter end type role = :user | :assistant - class Role < OpenAI::Enum + module Role + extend OpenAI::Internal::Type::Enum + USER: :user ASSISTANT: :assistant - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::role] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::role] end type attachment = @@ -66,7 +68,7 @@ module OpenAI tools: ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::tool] } - class Attachment < OpenAI::BaseModel + class Attachment < OpenAI::Internal::Type::BaseModel attr_reader file_id: String? def file_id=: (String) -> String @@ -77,39 +79,34 @@ module OpenAI ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::tool] ) -> ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::tool] - def initialize: - ( - file_id: String, - tools: ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::tool] - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::MessageCreateParams::attachment - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?file_id: String, + ?tools: ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::tool] + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::MessageCreateParams::attachment + def to_hash: -> { + file_id: String, + tools: ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::tool] + } type tool = - OpenAI::Models::Beta::CodeInterpreterTool - | OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch + OpenAI::Beta::CodeInterpreterTool + | OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch + + module Tool + extend OpenAI::Internal::Type::Union - class Tool < OpenAI::Union type file_search = { type: :file_search } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_accessor type: :file_search - def initialize: - (type: :file_search) -> void - | ( - ?OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :file_search) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::file_search + def to_hash: -> { type: :file_search } end - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::CodeInterpreterTool], [:file_search, OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::tool] end end end diff --git a/sig/openai/models/beta/threads/message_delete_params.rbs b/sig/openai/models/beta/threads/message_delete_params.rbs index 0b6c4e94..50109473 100644 --- a/sig/openai/models/beta/threads/message_delete_params.rbs +++ b/sig/openai/models/beta/threads/message_delete_params.rbs @@ -3,22 +3,23 @@ module OpenAI module Beta module Threads type message_delete_params = - { thread_id: String } & OpenAI::request_parameters + { thread_id: String } & OpenAI::Internal::Type::request_parameters - class MessageDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class MessageDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor thread_id: String - def initialize: - (thread_id: String, request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Beta::Threads::message_delete_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + thread_id: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::message_delete_params + def to_hash: -> { + thread_id: String, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/beta/threads/message_deleted.rbs b/sig/openai/models/beta/threads/message_deleted.rbs index 58941655..5e95bfc4 100644 --- a/sig/openai/models/beta/threads/message_deleted.rbs +++ b/sig/openai/models/beta/threads/message_deleted.rbs @@ -5,25 +5,24 @@ module OpenAI type message_deleted = { id: String, deleted: bool, object: :"thread.message.deleted" } - class MessageDeleted < OpenAI::BaseModel + class MessageDeleted < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor deleted: bool attr_accessor object: :"thread.message.deleted" - def initialize: - ( - id: String, - deleted: bool, - object: :"thread.message.deleted" - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::message_deleted - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + deleted: bool, + ?object: :"thread.message.deleted" + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::message_deleted + def to_hash: -> { + id: String, + deleted: bool, + object: :"thread.message.deleted" + } end end end diff --git a/sig/openai/models/beta/threads/message_delta.rbs b/sig/openai/models/beta/threads/message_delta.rbs index 2c4dd70b..a7f93140 100644 --- a/sig/openai/models/beta/threads/message_delta.rbs +++ b/sig/openai/models/beta/threads/message_delta.rbs @@ -8,7 +8,7 @@ module OpenAI role: OpenAI::Models::Beta::Threads::MessageDelta::role } - class MessageDelta < OpenAI::BaseModel + class MessageDelta < OpenAI::Internal::Type::BaseModel attr_reader content: ::Array[OpenAI::Models::Beta::Threads::message_content_delta]? def content=: ( @@ -21,25 +21,25 @@ module OpenAI OpenAI::Models::Beta::Threads::MessageDelta::role ) -> OpenAI::Models::Beta::Threads::MessageDelta::role - def initialize: - ( - content: ::Array[OpenAI::Models::Beta::Threads::message_content_delta], - role: OpenAI::Models::Beta::Threads::MessageDelta::role - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::message_delta - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?content: ::Array[OpenAI::Models::Beta::Threads::message_content_delta], + ?role: OpenAI::Models::Beta::Threads::MessageDelta::role + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::message_delta + def to_hash: -> { + content: ::Array[OpenAI::Models::Beta::Threads::message_content_delta], + role: OpenAI::Models::Beta::Threads::MessageDelta::role + } type role = :user | :assistant - class Role < OpenAI::Enum + module Role + extend OpenAI::Internal::Type::Enum + USER: :user ASSISTANT: :assistant - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::MessageDelta::role] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::MessageDelta::role] end end end diff --git a/sig/openai/models/beta/threads/message_delta_event.rbs b/sig/openai/models/beta/threads/message_delta_event.rbs index f682114b..8068a35a 100644 --- a/sig/openai/models/beta/threads/message_delta_event.rbs +++ b/sig/openai/models/beta/threads/message_delta_event.rbs @@ -5,29 +5,28 @@ module OpenAI type message_delta_event = { id: String, - delta: OpenAI::Models::Beta::Threads::MessageDelta, + delta: OpenAI::Beta::Threads::MessageDelta, object: :"thread.message.delta" } - class MessageDeltaEvent < OpenAI::BaseModel + class MessageDeltaEvent < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor delta: OpenAI::Models::Beta::Threads::MessageDelta + attr_accessor delta: OpenAI::Beta::Threads::MessageDelta attr_accessor object: :"thread.message.delta" - def initialize: - ( - id: String, - delta: OpenAI::Models::Beta::Threads::MessageDelta, - object: :"thread.message.delta" - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::message_delta_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + delta: OpenAI::Beta::Threads::MessageDelta, + ?object: :"thread.message.delta" + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::message_delta_event + def to_hash: -> { + id: String, + delta: OpenAI::Beta::Threads::MessageDelta, + object: :"thread.message.delta" + } end end end diff --git a/sig/openai/models/beta/threads/message_list_params.rbs b/sig/openai/models/beta/threads/message_list_params.rbs index ec8567b0..32254799 100644 --- a/sig/openai/models/beta/threads/message_list_params.rbs +++ b/sig/openai/models/beta/threads/message_list_params.rbs @@ -10,11 +10,11 @@ module OpenAI order: OpenAI::Models::Beta::Threads::MessageListParams::order, run_id: String } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class MessageListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class MessageListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -38,29 +38,33 @@ module OpenAI def run_id=: (String) -> String - def initialize: - ( - after: String, - before: String, - limit: Integer, - order: OpenAI::Models::Beta::Threads::MessageListParams::order, - run_id: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::message_list_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::Threads::message_list_params + def initialize: ( + ?after: String, + ?before: String, + ?limit: Integer, + ?order: OpenAI::Models::Beta::Threads::MessageListParams::order, + ?run_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + before: String, + limit: Integer, + order: OpenAI::Models::Beta::Threads::MessageListParams::order, + run_id: String, + request_options: OpenAI::RequestOptions + } type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::MessageListParams::order] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::MessageListParams::order] end end end diff --git a/sig/openai/models/beta/threads/message_retrieve_params.rbs b/sig/openai/models/beta/threads/message_retrieve_params.rbs index f5475f00..b0dc3313 100644 --- a/sig/openai/models/beta/threads/message_retrieve_params.rbs +++ b/sig/openai/models/beta/threads/message_retrieve_params.rbs @@ -3,22 +3,23 @@ module OpenAI module Beta module Threads type message_retrieve_params = - { thread_id: String } & OpenAI::request_parameters + { thread_id: String } & OpenAI::Internal::Type::request_parameters - class MessageRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class MessageRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor thread_id: String - def initialize: - (thread_id: String, request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Beta::Threads::message_retrieve_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + thread_id: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::message_retrieve_params + def to_hash: -> { + thread_id: String, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/beta/threads/message_update_params.rbs b/sig/openai/models/beta/threads/message_update_params.rbs index 5fb9f563..a4a5cb18 100644 --- a/sig/openai/models/beta/threads/message_update_params.rbs +++ b/sig/openai/models/beta/threads/message_update_params.rbs @@ -4,28 +4,27 @@ module OpenAI module Threads type message_update_params = { thread_id: String, metadata: OpenAI::Models::metadata? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class MessageUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class MessageUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor thread_id: String attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - thread_id: String, - metadata: OpenAI::Models::metadata?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::message_update_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + thread_id: String, + ?metadata: OpenAI::Models::metadata?, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::message_update_params + def to_hash: -> { + thread_id: String, + metadata: OpenAI::Models::metadata?, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/beta/threads/refusal_content_block.rbs b/sig/openai/models/beta/threads/refusal_content_block.rbs index b44bf064..815e53d2 100644 --- a/sig/openai/models/beta/threads/refusal_content_block.rbs +++ b/sig/openai/models/beta/threads/refusal_content_block.rbs @@ -4,19 +4,14 @@ module OpenAI module Threads type refusal_content_block = { refusal: String, type: :refusal } - class RefusalContentBlock < OpenAI::BaseModel + class RefusalContentBlock < OpenAI::Internal::Type::BaseModel attr_accessor refusal: String attr_accessor type: :refusal - def initialize: - (refusal: String, type: :refusal) -> void - | ( - ?OpenAI::Models::Beta::Threads::refusal_content_block - | OpenAI::BaseModel data - ) -> void + def initialize: (refusal: String, ?type: :refusal) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::refusal_content_block + def to_hash: -> { refusal: String, type: :refusal } end end end diff --git a/sig/openai/models/beta/threads/refusal_delta_block.rbs b/sig/openai/models/beta/threads/refusal_delta_block.rbs index eae553e9..e38a21fa 100644 --- a/sig/openai/models/beta/threads/refusal_delta_block.rbs +++ b/sig/openai/models/beta/threads/refusal_delta_block.rbs @@ -5,7 +5,7 @@ module OpenAI type refusal_delta_block = { index: Integer, type: :refusal, refusal: String } - class RefusalDeltaBlock < OpenAI::BaseModel + class RefusalDeltaBlock < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_accessor type: :refusal @@ -14,14 +14,13 @@ module OpenAI def refusal=: (String) -> String - def initialize: - (index: Integer, refusal: String, type: :refusal) -> void - | ( - ?OpenAI::Models::Beta::Threads::refusal_delta_block - | OpenAI::BaseModel data - ) -> void + def initialize: ( + index: Integer, + ?refusal: String, + ?type: :refusal + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::refusal_delta_block + def to_hash: -> { index: Integer, type: :refusal, refusal: String } end end end diff --git a/sig/openai/models/beta/threads/required_action_function_tool_call.rbs b/sig/openai/models/beta/threads/required_action_function_tool_call.rbs index 3b4334bf..8f6d8be6 100644 --- a/sig/openai/models/beta/threads/required_action_function_tool_call.rbs +++ b/sig/openai/models/beta/threads/required_action_function_tool_call.rbs @@ -5,45 +5,39 @@ module OpenAI type required_action_function_tool_call = { id: String, - function: OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function, + function: OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function, type: :function } - class RequiredActionFunctionToolCall < OpenAI::BaseModel + class RequiredActionFunctionToolCall < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor function: OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function + attr_accessor function: OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function attr_accessor type: :function - def initialize: - ( - id: String, - function: OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function, - type: :function - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::required_action_function_tool_call - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + function: OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function, + ?type: :function + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::required_action_function_tool_call + def to_hash: -> { + id: String, + function: OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function, + type: :function + } type function = { arguments: String, name: String } - class Function < OpenAI::BaseModel + class Function < OpenAI::Internal::Type::BaseModel attr_accessor arguments: String attr_accessor name: String - def initialize: - (arguments: String, name: String) -> void - | ( - ?OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::function - | OpenAI::BaseModel data - ) -> void + def initialize: (arguments: String, name: String) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::function + def to_hash: -> { arguments: String, name: String } end end end diff --git a/sig/openai/models/beta/threads/run.rbs b/sig/openai/models/beta/threads/run.rbs index fc4b4d57..92f30e03 100644 --- a/sig/openai/models/beta/threads/run.rbs +++ b/sig/openai/models/beta/threads/run.rbs @@ -11,29 +11,29 @@ module OpenAI created_at: Integer, expires_at: Integer?, failed_at: Integer?, - incomplete_details: OpenAI::Models::Beta::Threads::Run::IncompleteDetails?, + incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails?, instructions: String, - last_error: OpenAI::Models::Beta::Threads::Run::LastError?, + last_error: OpenAI::Beta::Threads::Run::LastError?, max_completion_tokens: Integer?, max_prompt_tokens: Integer?, metadata: OpenAI::Models::metadata?, model: String, object: :"thread.run", parallel_tool_calls: bool, - required_action: OpenAI::Models::Beta::Threads::Run::RequiredAction?, + required_action: OpenAI::Beta::Threads::Run::RequiredAction?, response_format: OpenAI::Models::Beta::assistant_response_format_option?, started_at: Integer?, status: OpenAI::Models::Beta::Threads::run_status, thread_id: String, tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, tools: ::Array[OpenAI::Models::Beta::assistant_tool], - truncation_strategy: OpenAI::Models::Beta::Threads::Run::TruncationStrategy?, - usage: OpenAI::Models::Beta::Threads::Run::Usage?, + truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy?, + usage: OpenAI::Beta::Threads::Run::Usage?, temperature: Float?, top_p: Float? } - class Run < OpenAI::BaseModel + class Run < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor assistant_id: String @@ -48,11 +48,11 @@ module OpenAI attr_accessor failed_at: Integer? - attr_accessor incomplete_details: OpenAI::Models::Beta::Threads::Run::IncompleteDetails? + attr_accessor incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails? attr_accessor instructions: String - attr_accessor last_error: OpenAI::Models::Beta::Threads::Run::LastError? + attr_accessor last_error: OpenAI::Beta::Threads::Run::LastError? attr_accessor max_completion_tokens: Integer? @@ -66,7 +66,7 @@ module OpenAI attr_accessor parallel_tool_calls: bool - attr_accessor required_action: OpenAI::Models::Beta::Threads::Run::RequiredAction? + attr_accessor required_action: OpenAI::Beta::Threads::Run::RequiredAction? attr_accessor response_format: OpenAI::Models::Beta::assistant_response_format_option? @@ -80,80 +80,103 @@ module OpenAI attr_accessor tools: ::Array[OpenAI::Models::Beta::assistant_tool] - attr_accessor truncation_strategy: OpenAI::Models::Beta::Threads::Run::TruncationStrategy? + attr_accessor truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy? - attr_accessor usage: OpenAI::Models::Beta::Threads::Run::Usage? + attr_accessor usage: OpenAI::Beta::Threads::Run::Usage? attr_accessor temperature: Float? attr_accessor top_p: Float? - def initialize: - ( - id: String, - assistant_id: String, - cancelled_at: Integer?, - completed_at: Integer?, - created_at: Integer, - expires_at: Integer?, - failed_at: Integer?, - incomplete_details: OpenAI::Models::Beta::Threads::Run::IncompleteDetails?, - instructions: String, - last_error: OpenAI::Models::Beta::Threads::Run::LastError?, - max_completion_tokens: Integer?, - max_prompt_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - model: String, - parallel_tool_calls: bool, - required_action: OpenAI::Models::Beta::Threads::Run::RequiredAction?, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - started_at: Integer?, - status: OpenAI::Models::Beta::Threads::run_status, - thread_id: String, - tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, - tools: ::Array[OpenAI::Models::Beta::assistant_tool], - truncation_strategy: OpenAI::Models::Beta::Threads::Run::TruncationStrategy?, - usage: OpenAI::Models::Beta::Threads::Run::Usage?, - temperature: Float?, - top_p: Float?, - object: :"thread.run" - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::run | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::Threads::run + def initialize: ( + id: String, + assistant_id: String, + cancelled_at: Integer?, + completed_at: Integer?, + created_at: Integer, + expires_at: Integer?, + failed_at: Integer?, + incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails?, + instructions: String, + last_error: OpenAI::Beta::Threads::Run::LastError?, + max_completion_tokens: Integer?, + max_prompt_tokens: Integer?, + metadata: OpenAI::Models::metadata?, + model: String, + parallel_tool_calls: bool, + required_action: OpenAI::Beta::Threads::Run::RequiredAction?, + response_format: OpenAI::Models::Beta::assistant_response_format_option?, + started_at: Integer?, + status: OpenAI::Models::Beta::Threads::run_status, + thread_id: String, + tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, + tools: ::Array[OpenAI::Models::Beta::assistant_tool], + truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy?, + usage: OpenAI::Beta::Threads::Run::Usage?, + ?temperature: Float?, + ?top_p: Float?, + ?object: :"thread.run" + ) -> void + + def to_hash: -> { + id: String, + assistant_id: String, + cancelled_at: Integer?, + completed_at: Integer?, + created_at: Integer, + expires_at: Integer?, + failed_at: Integer?, + incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails?, + instructions: String, + last_error: OpenAI::Beta::Threads::Run::LastError?, + max_completion_tokens: Integer?, + max_prompt_tokens: Integer?, + metadata: OpenAI::Models::metadata?, + model: String, + object: :"thread.run", + parallel_tool_calls: bool, + required_action: OpenAI::Beta::Threads::Run::RequiredAction?, + response_format: OpenAI::Models::Beta::assistant_response_format_option?, + started_at: Integer?, + status: OpenAI::Models::Beta::Threads::run_status, + thread_id: String, + tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, + tools: ::Array[OpenAI::Models::Beta::assistant_tool], + truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy?, + usage: OpenAI::Beta::Threads::Run::Usage?, + temperature: Float?, + top_p: Float? + } type incomplete_details = { reason: OpenAI::Models::Beta::Threads::Run::IncompleteDetails::reason } - class IncompleteDetails < OpenAI::BaseModel + class IncompleteDetails < OpenAI::Internal::Type::BaseModel attr_reader reason: OpenAI::Models::Beta::Threads::Run::IncompleteDetails::reason? def reason=: ( OpenAI::Models::Beta::Threads::Run::IncompleteDetails::reason ) -> OpenAI::Models::Beta::Threads::Run::IncompleteDetails::reason - def initialize: - ( - reason: OpenAI::Models::Beta::Threads::Run::IncompleteDetails::reason - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Run::incomplete_details - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?reason: OpenAI::Models::Beta::Threads::Run::IncompleteDetails::reason + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Run::incomplete_details + def to_hash: -> { + reason: OpenAI::Models::Beta::Threads::Run::IncompleteDetails::reason + } type reason = :max_completion_tokens | :max_prompt_tokens - class Reason < OpenAI::Enum + module Reason + extend OpenAI::Internal::Type::Enum + MAX_COMPLETION_TOKENS: :max_completion_tokens MAX_PROMPT_TOKENS: :max_prompt_tokens - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Run::IncompleteDetails::reason] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Run::IncompleteDetails::reason] end end @@ -163,75 +186,70 @@ module OpenAI message: String } - class LastError < OpenAI::BaseModel + class LastError < OpenAI::Internal::Type::BaseModel attr_accessor code: OpenAI::Models::Beta::Threads::Run::LastError::code attr_accessor message: String - def initialize: - ( - code: OpenAI::Models::Beta::Threads::Run::LastError::code, - message: String - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Run::last_error - | OpenAI::BaseModel data - ) -> void + def initialize: ( + code: OpenAI::Models::Beta::Threads::Run::LastError::code, + message: String + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Run::last_error + def to_hash: -> { + code: OpenAI::Models::Beta::Threads::Run::LastError::code, + message: String + } type code = :server_error | :rate_limit_exceeded | :invalid_prompt - class Code < OpenAI::Enum + module Code + extend OpenAI::Internal::Type::Enum + SERVER_ERROR: :server_error RATE_LIMIT_EXCEEDED: :rate_limit_exceeded INVALID_PROMPT: :invalid_prompt - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Run::LastError::code] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Run::LastError::code] end end type required_action = { - submit_tool_outputs: OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, + submit_tool_outputs: OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, type: :submit_tool_outputs } - class RequiredAction < OpenAI::BaseModel - attr_accessor submit_tool_outputs: OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs + class RequiredAction < OpenAI::Internal::Type::BaseModel + attr_accessor submit_tool_outputs: OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs attr_accessor type: :submit_tool_outputs - def initialize: - ( - submit_tool_outputs: OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, - type: :submit_tool_outputs - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Run::required_action - | OpenAI::BaseModel data - ) -> void + def initialize: ( + submit_tool_outputs: OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, + ?type: :submit_tool_outputs + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Run::required_action + def to_hash: -> { + submit_tool_outputs: OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, + type: :submit_tool_outputs + } type submit_tool_outputs = { - tool_calls: ::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall] + tool_calls: ::Array[OpenAI::Beta::Threads::RequiredActionFunctionToolCall] } - class SubmitToolOutputs < OpenAI::BaseModel - attr_accessor tool_calls: ::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall] + class SubmitToolOutputs < OpenAI::Internal::Type::BaseModel + attr_accessor tool_calls: ::Array[OpenAI::Beta::Threads::RequiredActionFunctionToolCall] - def initialize: - ( - tool_calls: ::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall] - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Run::RequiredAction::submit_tool_outputs - | OpenAI::BaseModel data - ) -> void + def initialize: ( + tool_calls: ::Array[OpenAI::Beta::Threads::RequiredActionFunctionToolCall] + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Run::RequiredAction::submit_tool_outputs + def to_hash: -> { + tool_calls: ::Array[OpenAI::Beta::Threads::RequiredActionFunctionToolCall] + } end end @@ -241,30 +259,30 @@ module OpenAI last_messages: Integer? } - class TruncationStrategy < OpenAI::BaseModel + class TruncationStrategy < OpenAI::Internal::Type::BaseModel attr_accessor type: OpenAI::Models::Beta::Threads::Run::TruncationStrategy::type_ attr_accessor last_messages: Integer? - def initialize: - ( - type: OpenAI::Models::Beta::Threads::Run::TruncationStrategy::type_, - last_messages: Integer? - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Run::truncation_strategy - | OpenAI::BaseModel data - ) -> void + def initialize: ( + type: OpenAI::Models::Beta::Threads::Run::TruncationStrategy::type_, + ?last_messages: Integer? + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Run::truncation_strategy + def to_hash: -> { + type: OpenAI::Models::Beta::Threads::Run::TruncationStrategy::type_, + last_messages: Integer? + } type type_ = :auto | :last_messages - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + AUTO: :auto LAST_MESSAGES: :last_messages - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Run::TruncationStrategy::type_] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Run::TruncationStrategy::type_] end end @@ -275,25 +293,24 @@ module OpenAI total_tokens: Integer } - class Usage < OpenAI::BaseModel + class Usage < OpenAI::Internal::Type::BaseModel attr_accessor completion_tokens: Integer attr_accessor prompt_tokens: Integer attr_accessor total_tokens: Integer - def initialize: - ( - completion_tokens: Integer, - prompt_tokens: Integer, - total_tokens: Integer - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Run::usage - | OpenAI::BaseModel data - ) -> void + def initialize: ( + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Run::usage + def to_hash: -> { + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + } end end end diff --git a/sig/openai/models/beta/threads/run_cancel_params.rbs b/sig/openai/models/beta/threads/run_cancel_params.rbs index 34ea5c7f..d96641a6 100644 --- a/sig/openai/models/beta/threads/run_cancel_params.rbs +++ b/sig/openai/models/beta/threads/run_cancel_params.rbs @@ -3,22 +3,23 @@ module OpenAI module Beta module Threads type run_cancel_params = - { thread_id: String } & OpenAI::request_parameters + { thread_id: String } & OpenAI::Internal::Type::request_parameters - class RunCancelParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class RunCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor thread_id: String - def initialize: - (thread_id: String, request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Beta::Threads::run_cancel_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + thread_id: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::run_cancel_params + def to_hash: -> { + thread_id: String, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/beta/threads/run_create_params.rbs b/sig/openai/models/beta/threads/run_create_params.rbs index eac12be2..a9c8e394 100644 --- a/sig/openai/models/beta/threads/run_create_params.rbs +++ b/sig/openai/models/beta/threads/run_create_params.rbs @@ -7,7 +7,7 @@ module OpenAI assistant_id: String, include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], additional_instructions: String?, - additional_messages: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage]?, + additional_messages: ::Array[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage]?, instructions: String?, max_completion_tokens: Integer?, max_prompt_tokens: Integer?, @@ -20,13 +20,13 @@ module OpenAI tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, top_p: Float?, - truncation_strategy: OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy? + truncation_strategy: OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class RunCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class RunCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor assistant_id: String @@ -38,7 +38,7 @@ module OpenAI attr_accessor additional_instructions: String? - attr_accessor additional_messages: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage]? + attr_accessor additional_messages: ::Array[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage]? attr_accessor instructions: String? @@ -66,87 +66,102 @@ module OpenAI attr_accessor top_p: Float? - attr_accessor truncation_strategy: OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy? - - def initialize: - ( - assistant_id: String, - include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], - additional_instructions: String?, - additional_messages: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage]?, - instructions: String?, - max_completion_tokens: Integer?, - max_prompt_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - model: OpenAI::Models::Beta::Threads::RunCreateParams::model?, - parallel_tool_calls: bool, - reasoning_effort: OpenAI::Models::reasoning_effort?, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, - tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, - top_p: Float?, - truncation_strategy: OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::run_create_params - | OpenAI::BaseModel data - ) -> void + attr_accessor truncation_strategy: OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy? - def to_hash: -> OpenAI::Models::Beta::Threads::run_create_params + def initialize: ( + assistant_id: String, + ?include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], + ?additional_instructions: String?, + ?additional_messages: ::Array[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage]?, + ?instructions: String?, + ?max_completion_tokens: Integer?, + ?max_prompt_tokens: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::Beta::Threads::RunCreateParams::model?, + ?parallel_tool_calls: bool, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, + ?tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, + ?top_p: Float?, + ?truncation_strategy: OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy?, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + assistant_id: String, + include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], + additional_instructions: String?, + additional_messages: ::Array[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage]?, + instructions: String?, + max_completion_tokens: Integer?, + max_prompt_tokens: Integer?, + metadata: OpenAI::Models::metadata?, + model: OpenAI::Models::Beta::Threads::RunCreateParams::model?, + parallel_tool_calls: bool, + reasoning_effort: OpenAI::Models::reasoning_effort?, + response_format: OpenAI::Models::Beta::assistant_response_format_option?, + temperature: Float?, + tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, + tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, + top_p: Float?, + truncation_strategy: OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy?, + request_options: OpenAI::RequestOptions + } type additional_message = { content: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::content, role: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::role, - attachments: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]?, + attachments: ::Array[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]?, metadata: OpenAI::Models::metadata? } - class AdditionalMessage < OpenAI::BaseModel + class AdditionalMessage < OpenAI::Internal::Type::BaseModel attr_accessor content: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::content attr_accessor role: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::role - attr_accessor attachments: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]? + attr_accessor attachments: ::Array[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]? attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - content: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::content, - role: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::role, - attachments: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]?, - metadata: OpenAI::Models::metadata? - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::RunCreateParams::additional_message - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::content, + role: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::role, + ?attachments: ::Array[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]?, + ?metadata: OpenAI::Models::metadata? + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::RunCreateParams::additional_message + def to_hash: -> { + content: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::content, + role: OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::role, + attachments: ::Array[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]?, + metadata: OpenAI::Models::metadata? + } type content = String | ::Array[OpenAI::Models::Beta::Threads::message_content_part_param] - class Content < OpenAI::Union - type message_content_part_param_array = - ::Array[OpenAI::Models::Beta::Threads::message_content_part_param] + module Content + extend OpenAI::Internal::Type::Union - MessageContentPartParamArray: message_content_part_param_array + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::content] - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Beta::Threads::message_content_part_param]]] + MessageContentPartParamArray: OpenAI::Internal::Type::Converter end type role = :user | :assistant - class Role < OpenAI::Enum + module Role + extend OpenAI::Internal::Type::Enum + USER: :user ASSISTANT: :assistant - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::role] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::role] end type attachment = @@ -155,7 +170,7 @@ module OpenAI tools: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::tool] } - class Attachment < OpenAI::BaseModel + class Attachment < OpenAI::Internal::Type::BaseModel attr_reader file_id: String? def file_id=: (String) -> String @@ -166,47 +181,44 @@ module OpenAI ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::tool] ) -> ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::tool] - def initialize: - ( - file_id: String, - tools: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::tool] - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::attachment - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?file_id: String, + ?tools: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::tool] + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::attachment + def to_hash: -> { + file_id: String, + tools: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::tool] + } type tool = - OpenAI::Models::Beta::CodeInterpreterTool - | OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch + OpenAI::Beta::CodeInterpreterTool + | OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch + + module Tool + extend OpenAI::Internal::Type::Union - class Tool < OpenAI::Union type file_search = { type: :file_search } - class FileSearch < OpenAI::BaseModel + class FileSearch < OpenAI::Internal::Type::BaseModel attr_accessor type: :file_search - def initialize: - (type: :file_search) -> void - | ( - ?OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :file_search) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::file_search + def to_hash: -> { type: :file_search } end - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::CodeInterpreterTool], [:file_search, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::tool] end end end type model = String | OpenAI::Models::chat_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::model] end type truncation_strategy = @@ -215,30 +227,30 @@ module OpenAI last_messages: Integer? } - class TruncationStrategy < OpenAI::BaseModel + class TruncationStrategy < OpenAI::Internal::Type::BaseModel attr_accessor type: OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::type_ attr_accessor last_messages: Integer? - def initialize: - ( - type: OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::type_, - last_messages: Integer? - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::RunCreateParams::truncation_strategy - | OpenAI::BaseModel data - ) -> void + def initialize: ( + type: OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::type_, + ?last_messages: Integer? + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::RunCreateParams::truncation_strategy + def to_hash: -> { + type: OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::type_, + last_messages: Integer? + } type type_ = :auto | :last_messages - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + AUTO: :auto LAST_MESSAGES: :last_messages - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::type_] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::type_] end end end diff --git a/sig/openai/models/beta/threads/run_list_params.rbs b/sig/openai/models/beta/threads/run_list_params.rbs index ba37749f..fa76718f 100644 --- a/sig/openai/models/beta/threads/run_list_params.rbs +++ b/sig/openai/models/beta/threads/run_list_params.rbs @@ -9,11 +9,11 @@ module OpenAI limit: Integer, order: OpenAI::Models::Beta::Threads::RunListParams::order } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class RunListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class RunListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -33,28 +33,31 @@ module OpenAI OpenAI::Models::Beta::Threads::RunListParams::order ) -> OpenAI::Models::Beta::Threads::RunListParams::order - def initialize: - ( - after: String, - before: String, - limit: Integer, - order: OpenAI::Models::Beta::Threads::RunListParams::order, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::run_list_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::Threads::run_list_params + def initialize: ( + ?after: String, + ?before: String, + ?limit: Integer, + ?order: OpenAI::Models::Beta::Threads::RunListParams::order, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + before: String, + limit: Integer, + order: OpenAI::Models::Beta::Threads::RunListParams::order, + request_options: OpenAI::RequestOptions + } type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::RunListParams::order] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::RunListParams::order] end end end diff --git a/sig/openai/models/beta/threads/run_retrieve_params.rbs b/sig/openai/models/beta/threads/run_retrieve_params.rbs index 44423e27..d100bece 100644 --- a/sig/openai/models/beta/threads/run_retrieve_params.rbs +++ b/sig/openai/models/beta/threads/run_retrieve_params.rbs @@ -3,22 +3,23 @@ module OpenAI module Beta module Threads type run_retrieve_params = - { thread_id: String } & OpenAI::request_parameters + { thread_id: String } & OpenAI::Internal::Type::request_parameters - class RunRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class RunRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor thread_id: String - def initialize: - (thread_id: String, request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Beta::Threads::run_retrieve_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + thread_id: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::run_retrieve_params + def to_hash: -> { + thread_id: String, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/beta/threads/run_status.rbs b/sig/openai/models/beta/threads/run_status.rbs index cfc75c7b..4a106ac0 100644 --- a/sig/openai/models/beta/threads/run_status.rbs +++ b/sig/openai/models/beta/threads/run_status.rbs @@ -13,7 +13,9 @@ module OpenAI | :incomplete | :expired - class RunStatus < OpenAI::Enum + module RunStatus + extend OpenAI::Internal::Type::Enum + QUEUED: :queued IN_PROGRESS: :in_progress REQUIRES_ACTION: :requires_action @@ -24,7 +26,7 @@ module OpenAI INCOMPLETE: :incomplete EXPIRED: :expired - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::run_status] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::run_status] end end end diff --git a/sig/openai/models/beta/threads/run_submit_tool_outputs_params.rbs b/sig/openai/models/beta/threads/run_submit_tool_outputs_params.rbs index d044c675..8d499043 100644 --- a/sig/openai/models/beta/threads/run_submit_tool_outputs_params.rbs +++ b/sig/openai/models/beta/threads/run_submit_tool_outputs_params.rbs @@ -5,34 +5,33 @@ module OpenAI type run_submit_tool_outputs_params = { thread_id: String, - tool_outputs: ::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput] + tool_outputs: ::Array[OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput] } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class RunSubmitToolOutputsParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class RunSubmitToolOutputsParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor thread_id: String - attr_accessor tool_outputs: ::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput] + attr_accessor tool_outputs: ::Array[OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput] - def initialize: - ( - thread_id: String, - tool_outputs: ::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::run_submit_tool_outputs_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + thread_id: String, + tool_outputs: ::Array[OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::run_submit_tool_outputs_params + def to_hash: -> { + thread_id: String, + tool_outputs: ::Array[OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], + request_options: OpenAI::RequestOptions + } type tool_output = { output: String, tool_call_id: String } - class ToolOutput < OpenAI::BaseModel + class ToolOutput < OpenAI::Internal::Type::BaseModel attr_reader output: String? def output=: (String) -> String @@ -41,14 +40,9 @@ module OpenAI def tool_call_id=: (String) -> String - def initialize: - (output: String, tool_call_id: String) -> void - | ( - ?OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::tool_output - | OpenAI::BaseModel data - ) -> void + def initialize: (?output: String, ?tool_call_id: String) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::tool_output + def to_hash: -> { output: String, tool_call_id: String } end end end diff --git a/sig/openai/models/beta/threads/run_update_params.rbs b/sig/openai/models/beta/threads/run_update_params.rbs index ad8a6e84..664db83c 100644 --- a/sig/openai/models/beta/threads/run_update_params.rbs +++ b/sig/openai/models/beta/threads/run_update_params.rbs @@ -4,28 +4,27 @@ module OpenAI module Threads type run_update_params = { thread_id: String, metadata: OpenAI::Models::metadata? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class RunUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class RunUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor thread_id: String attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - thread_id: String, - metadata: OpenAI::Models::metadata?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::run_update_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + thread_id: String, + ?metadata: OpenAI::Models::metadata?, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::run_update_params + def to_hash: -> { + thread_id: String, + metadata: OpenAI::Models::metadata?, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/beta/threads/runs/code_interpreter_logs.rbs b/sig/openai/models/beta/threads/runs/code_interpreter_logs.rbs index d62fbb78..31b1e593 100644 --- a/sig/openai/models/beta/threads/runs/code_interpreter_logs.rbs +++ b/sig/openai/models/beta/threads/runs/code_interpreter_logs.rbs @@ -6,7 +6,7 @@ module OpenAI type code_interpreter_logs = { index: Integer, type: :logs, logs: String } - class CodeInterpreterLogs < OpenAI::BaseModel + class CodeInterpreterLogs < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_accessor type: :logs @@ -15,14 +15,13 @@ module OpenAI def logs=: (String) -> String - def initialize: - (index: Integer, logs: String, type: :logs) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::code_interpreter_logs - | OpenAI::BaseModel data - ) -> void + def initialize: ( + index: Integer, + ?logs: String, + ?type: :logs + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::code_interpreter_logs + def to_hash: -> { index: Integer, type: :logs, logs: String } end end end diff --git a/sig/openai/models/beta/threads/runs/code_interpreter_output_image.rbs b/sig/openai/models/beta/threads/runs/code_interpreter_output_image.rbs index 2560a0c4..012b6c3f 100644 --- a/sig/openai/models/beta/threads/runs/code_interpreter_output_image.rbs +++ b/sig/openai/models/beta/threads/runs/code_interpreter_output_image.rbs @@ -7,48 +7,42 @@ module OpenAI { index: Integer, type: :image, - image: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image + image: OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image } - class CodeInterpreterOutputImage < OpenAI::BaseModel + class CodeInterpreterOutputImage < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_accessor type: :image - attr_reader image: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image? + attr_reader image: OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image? def image=: ( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image - ) -> OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image - - def initialize: - ( - index: Integer, - image: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image, - type: :image - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::code_interpreter_output_image - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::code_interpreter_output_image + OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image + ) -> OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image + + def initialize: ( + index: Integer, + ?image: OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image, + ?type: :image + ) -> void + + def to_hash: -> { + index: Integer, + type: :image, + image: OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image + } type image = { file_id: String } - class Image < OpenAI::BaseModel + class Image < OpenAI::Internal::Type::BaseModel attr_reader file_id: String? def file_id=: (String) -> String - def initialize: - (file_id: String) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::image - | OpenAI::BaseModel data - ) -> void + def initialize: (?file_id: String) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::image + def to_hash: -> { file_id: String } end end end diff --git a/sig/openai/models/beta/threads/runs/code_interpreter_tool_call.rbs b/sig/openai/models/beta/threads/runs/code_interpreter_tool_call.rbs index 3c997331..68ed586a 100644 --- a/sig/openai/models/beta/threads/runs/code_interpreter_tool_call.rbs +++ b/sig/openai/models/beta/threads/runs/code_interpreter_tool_call.rbs @@ -6,29 +6,28 @@ module OpenAI type code_interpreter_tool_call = { id: String, - code_interpreter: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter, + code_interpreter: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter, type: :code_interpreter } - class CodeInterpreterToolCall < OpenAI::BaseModel + class CodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor code_interpreter: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter + attr_accessor code_interpreter: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter attr_accessor type: :code_interpreter - def initialize: - ( - id: String, - code_interpreter: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter, - type: :code_interpreter - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::code_interpreter_tool_call - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + code_interpreter: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter, + ?type: :code_interpreter + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::code_interpreter_tool_call + def to_hash: -> { + id: String, + code_interpreter: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter, + type: :code_interpreter + } type code_interpreter = { @@ -36,85 +35,73 @@ module OpenAI outputs: ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::output] } - class CodeInterpreter < OpenAI::BaseModel + class CodeInterpreter < OpenAI::Internal::Type::BaseModel attr_accessor input: String attr_accessor outputs: ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::output] - def initialize: - ( - input: String, - outputs: ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::output] - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::code_interpreter - | OpenAI::BaseModel data - ) -> void + def initialize: ( + input: String, + outputs: ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::output] + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::code_interpreter + def to_hash: -> { + input: String, + outputs: ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::output] + } type output = - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs - | OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs + | OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image + + module Output + extend OpenAI::Internal::Type::Union - class Output < OpenAI::Union type logs = { logs: String, type: :logs } - class Logs < OpenAI::BaseModel + class Logs < OpenAI::Internal::Type::BaseModel attr_accessor logs: String attr_accessor type: :logs - def initialize: - (logs: String, type: :logs) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::logs - | OpenAI::BaseModel data - ) -> void + def initialize: (logs: String, ?type: :logs) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::logs + def to_hash: -> { logs: String, type: :logs } end type image = { - image: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image, + image: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image, type: :image } - class Image < OpenAI::BaseModel - attr_accessor image: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image + class Image < OpenAI::Internal::Type::BaseModel + attr_accessor image: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image attr_accessor type: :image - def initialize: - ( - image: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image, - type: :image - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::image - | OpenAI::BaseModel data - ) -> void + def initialize: ( + image: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image, + ?type: :image + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::image + def to_hash: -> { + image: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image, + type: :image + } type image = { file_id: String } - class Image < OpenAI::BaseModel + class Image < OpenAI::Internal::Type::BaseModel attr_accessor file_id: String - def initialize: - (file_id: String) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::image - | OpenAI::BaseModel data - ) -> void + def initialize: (file_id: String) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::image + def to_hash: -> { file_id: String } end end - private def self.variants: -> [[:logs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs], [:image, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::output] end end end diff --git a/sig/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbs b/sig/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbs index aeb01140..650b7203 100644 --- a/sig/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbs +++ b/sig/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbs @@ -8,10 +8,10 @@ module OpenAI index: Integer, type: :code_interpreter, id: String, - code_interpreter: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter + code_interpreter: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter } - class CodeInterpreterToolCallDelta < OpenAI::BaseModel + class CodeInterpreterToolCallDelta < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_accessor type: :code_interpreter @@ -20,25 +20,25 @@ module OpenAI def id=: (String) -> String - attr_reader code_interpreter: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter? + attr_reader code_interpreter: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter? def code_interpreter=: ( - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter - ) -> OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter - - def initialize: - ( - index: Integer, - id: String, - code_interpreter: OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter, - type: :code_interpreter - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::code_interpreter_tool_call_delta - | OpenAI::BaseModel data - ) -> void + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter + ) -> OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::code_interpreter_tool_call_delta + def initialize: ( + index: Integer, + ?id: String, + ?code_interpreter: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter, + ?type: :code_interpreter + ) -> void + + def to_hash: -> { + index: Integer, + type: :code_interpreter, + id: String, + code_interpreter: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter + } type code_interpreter = { @@ -46,7 +46,7 @@ module OpenAI outputs: ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::output] } - class CodeInterpreter < OpenAI::BaseModel + class CodeInterpreter < OpenAI::Internal::Type::BaseModel attr_reader input: String? def input=: (String) -> String @@ -57,24 +57,24 @@ module OpenAI ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::output] ) -> ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::output] - def initialize: - ( - input: String, - outputs: ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::output] - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::code_interpreter - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?input: String, + ?outputs: ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::output] + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::code_interpreter + def to_hash: -> { + input: String, + outputs: ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::output] + } type output = - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs - | OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage + OpenAI::Beta::Threads::Runs::CodeInterpreterLogs + | OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage + + module Output + extend OpenAI::Internal::Type::Union - class Output < OpenAI::Union - private def self.variants: -> [[:logs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs], [:image, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::output] end end end diff --git a/sig/openai/models/beta/threads/runs/file_search_tool_call.rbs b/sig/openai/models/beta/threads/runs/file_search_tool_call.rbs index e8476225..ca0f5a8e 100644 --- a/sig/openai/models/beta/threads/runs/file_search_tool_call.rbs +++ b/sig/openai/models/beta/threads/runs/file_search_tool_call.rbs @@ -6,60 +6,57 @@ module OpenAI type file_search_tool_call = { id: String, - file_search: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch, + file_search: OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch, type: :file_search } - class FileSearchToolCall < OpenAI::BaseModel + class FileSearchToolCall < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor file_search: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch + attr_accessor file_search: OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch attr_accessor type: :file_search - def initialize: - ( - id: String, - file_search: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch, - type: :file_search - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::file_search_tool_call - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + file_search: OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch, + ?type: :file_search + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::file_search_tool_call + def to_hash: -> { + id: String, + file_search: OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch, + type: :file_search + } type file_search = { - ranking_options: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, - results: ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] + ranking_options: OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, + results: ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] } - class FileSearch < OpenAI::BaseModel - attr_reader ranking_options: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions? + class FileSearch < OpenAI::Internal::Type::BaseModel + attr_reader ranking_options: OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions? def ranking_options=: ( - OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions - ) -> OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions + OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions + ) -> OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions - attr_reader results: ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result]? + attr_reader results: ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result]? def results=: ( - ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] - ) -> ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] + ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] + ) -> ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] - def initialize: - ( - ranking_options: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, - results: ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::file_search - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?ranking_options: OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, + ?results: ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::file_search + def to_hash: -> { + ranking_options: OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, + results: ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result] + } type ranking_options = { @@ -67,30 +64,30 @@ module OpenAI score_threshold: Float } - class RankingOptions < OpenAI::BaseModel + class RankingOptions < OpenAI::Internal::Type::BaseModel attr_accessor ranker: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::ranker attr_accessor score_threshold: Float - def initialize: - ( - ranker: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::ranker, - score_threshold: Float - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::ranking_options - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ranker: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::ranker, + score_threshold: Float + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::ranking_options + def to_hash: -> { + ranker: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::ranker, + score_threshold: Float + } type ranker = :auto | :default_2024_08_21 - class Ranker < OpenAI::Enum + module Ranker + extend OpenAI::Internal::Type::Enum + AUTO: :auto DEFAULT_2024_08_21: :default_2024_08_21 - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::ranker] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::ranker] end end @@ -99,35 +96,35 @@ module OpenAI file_id: String, file_name: String, score: Float, - content: ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] + content: ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] } - class Result < OpenAI::BaseModel + class Result < OpenAI::Internal::Type::BaseModel attr_accessor file_id: String attr_accessor file_name: String attr_accessor score: Float - attr_reader content: ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content]? + attr_reader content: ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content]? def content=: ( - ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] - ) -> ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] - - def initialize: - ( - file_id: String, - file_name: String, - score: Float, - content: ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::result - | OpenAI::BaseModel data - ) -> void + ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] + ) -> ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::result + def initialize: ( + file_id: String, + file_name: String, + score: Float, + ?content: ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] + ) -> void + + def to_hash: -> { + file_id: String, + file_name: String, + score: Float, + content: ::Array[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content] + } type content = { @@ -135,7 +132,7 @@ module OpenAI type: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::type_ } - class Content < OpenAI::BaseModel + class Content < OpenAI::Internal::Type::BaseModel attr_reader text: String? def text=: (String) -> String @@ -146,24 +143,24 @@ module OpenAI OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::type_ ) -> OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::type_ - def initialize: - ( - text: String, - type: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::type_ - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::content - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?text: String, + ?type: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::type_ + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::content + def to_hash: -> { + text: String, + type: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::type_ + } type type_ = :text - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + TEXT: :text - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::type_] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::type_] end end end diff --git a/sig/openai/models/beta/threads/runs/file_search_tool_call_delta.rbs b/sig/openai/models/beta/threads/runs/file_search_tool_call_delta.rbs index 91172bba..ebac80d2 100644 --- a/sig/openai/models/beta/threads/runs/file_search_tool_call_delta.rbs +++ b/sig/openai/models/beta/threads/runs/file_search_tool_call_delta.rbs @@ -6,7 +6,7 @@ module OpenAI type file_search_tool_call_delta = { file_search: top, index: Integer, type: :file_search, id: String } - class FileSearchToolCallDelta < OpenAI::BaseModel + class FileSearchToolCallDelta < OpenAI::Internal::Type::BaseModel attr_accessor file_search: top attr_accessor index: Integer @@ -17,19 +17,19 @@ module OpenAI def id=: (String) -> String - def initialize: - ( - file_search: top, - index: Integer, - id: String, - type: :file_search - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::file_search_tool_call_delta - | OpenAI::BaseModel data - ) -> void + def initialize: ( + file_search: top, + index: Integer, + ?id: String, + ?type: :file_search + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::file_search_tool_call_delta + def to_hash: -> { + file_search: top, + index: Integer, + type: :file_search, + id: String + } end end end diff --git a/sig/openai/models/beta/threads/runs/function_tool_call.rbs b/sig/openai/models/beta/threads/runs/function_tool_call.rbs index d3c415c2..aa424fe9 100644 --- a/sig/openai/models/beta/threads/runs/function_tool_call.rbs +++ b/sig/openai/models/beta/threads/runs/function_tool_call.rbs @@ -6,47 +6,49 @@ module OpenAI type function_tool_call = { id: String, - function: OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function, + function: OpenAI::Beta::Threads::Runs::FunctionToolCall::Function, type: :function } - class FunctionToolCall < OpenAI::BaseModel + class FunctionToolCall < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor function: OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function + attr_accessor function: OpenAI::Beta::Threads::Runs::FunctionToolCall::Function attr_accessor type: :function - def initialize: - ( - id: String, - function: OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function, - type: :function - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::function_tool_call - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + function: OpenAI::Beta::Threads::Runs::FunctionToolCall::Function, + ?type: :function + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::function_tool_call + def to_hash: -> { + id: String, + function: OpenAI::Beta::Threads::Runs::FunctionToolCall::Function, + type: :function + } type function = { arguments: String, name: String, output: String? } - class Function < OpenAI::BaseModel + class Function < OpenAI::Internal::Type::BaseModel attr_accessor arguments: String attr_accessor name: String attr_accessor output: String? - def initialize: - (arguments: String, name: String, output: String?) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::function - | OpenAI::BaseModel data - ) -> void + def initialize: ( + arguments: String, + name: String, + output: String? + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::function + def to_hash: -> { + arguments: String, + name: String, + output: String? + } end end end diff --git a/sig/openai/models/beta/threads/runs/function_tool_call_delta.rbs b/sig/openai/models/beta/threads/runs/function_tool_call_delta.rbs index 8a6261a4..2955d8c3 100644 --- a/sig/openai/models/beta/threads/runs/function_tool_call_delta.rbs +++ b/sig/openai/models/beta/threads/runs/function_tool_call_delta.rbs @@ -8,10 +8,10 @@ module OpenAI index: Integer, type: :function, id: String, - function: OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function + function: OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function } - class FunctionToolCallDelta < OpenAI::BaseModel + class FunctionToolCallDelta < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_accessor type: :function @@ -20,29 +20,29 @@ module OpenAI def id=: (String) -> String - attr_reader function: OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function? + attr_reader function: OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function? def function=: ( - OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function - ) -> OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function - - def initialize: - ( - index: Integer, - id: String, - function: OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function, - type: :function - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::function_tool_call_delta - | OpenAI::BaseModel data - ) -> void + OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function + ) -> OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::function_tool_call_delta + def initialize: ( + index: Integer, + ?id: String, + ?function: OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function, + ?type: :function + ) -> void + + def to_hash: -> { + index: Integer, + type: :function, + id: String, + function: OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function + } type function = { arguments: String, name: String, output: String? } - class Function < OpenAI::BaseModel + class Function < OpenAI::Internal::Type::BaseModel attr_reader arguments: String? def arguments=: (String) -> String @@ -53,14 +53,17 @@ module OpenAI attr_accessor output: String? - def initialize: - (arguments: String, name: String, output: String?) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::function - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?arguments: String, + ?name: String, + ?output: String? + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::function + def to_hash: -> { + arguments: String, + name: String, + output: String? + } end end end diff --git a/sig/openai/models/beta/threads/runs/message_creation_step_details.rbs b/sig/openai/models/beta/threads/runs/message_creation_step_details.rbs index 5eb94efe..6cfec2be 100644 --- a/sig/openai/models/beta/threads/runs/message_creation_step_details.rbs +++ b/sig/openai/models/beta/threads/runs/message_creation_step_details.rbs @@ -5,40 +5,33 @@ module OpenAI module Runs type message_creation_step_details = { - message_creation: OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, + message_creation: OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, type: :message_creation } - class MessageCreationStepDetails < OpenAI::BaseModel - attr_accessor message_creation: OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation + class MessageCreationStepDetails < OpenAI::Internal::Type::BaseModel + attr_accessor message_creation: OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation attr_accessor type: :message_creation - def initialize: - ( - message_creation: OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, - type: :message_creation - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::message_creation_step_details - | OpenAI::BaseModel data - ) -> void + def initialize: ( + message_creation: OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, + ?type: :message_creation + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::message_creation_step_details + def to_hash: -> { + message_creation: OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, + type: :message_creation + } type message_creation = { message_id: String } - class MessageCreation < OpenAI::BaseModel + class MessageCreation < OpenAI::Internal::Type::BaseModel attr_accessor message_id: String - def initialize: - (message_id: String) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::message_creation - | OpenAI::BaseModel data - ) -> void + def initialize: (message_id: String) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::message_creation + def to_hash: -> { message_id: String } end end end diff --git a/sig/openai/models/beta/threads/runs/run_step.rbs b/sig/openai/models/beta/threads/runs/run_step.rbs index 69a157aa..f58bd943 100644 --- a/sig/openai/models/beta/threads/runs/run_step.rbs +++ b/sig/openai/models/beta/threads/runs/run_step.rbs @@ -2,7 +2,6 @@ module OpenAI module Models module Beta module Threads - class RunStep = Runs::RunStep module Runs @@ -15,7 +14,7 @@ module OpenAI created_at: Integer, expired_at: Integer?, failed_at: Integer?, - last_error: OpenAI::Models::Beta::Threads::Runs::RunStep::LastError?, + last_error: OpenAI::Beta::Threads::Runs::RunStep::LastError?, metadata: OpenAI::Models::metadata?, object: :"thread.run.step", run_id: String, @@ -23,10 +22,10 @@ module OpenAI step_details: OpenAI::Models::Beta::Threads::Runs::RunStep::step_details, thread_id: String, type: OpenAI::Models::Beta::Threads::Runs::RunStep::type_, - usage: OpenAI::Models::Beta::Threads::Runs::RunStep::Usage? + usage: OpenAI::Beta::Threads::Runs::RunStep::Usage? } - class RunStep < OpenAI::BaseModel + class RunStep < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor assistant_id: String @@ -41,7 +40,7 @@ module OpenAI attr_accessor failed_at: Integer? - attr_accessor last_error: OpenAI::Models::Beta::Threads::Runs::RunStep::LastError? + attr_accessor last_error: OpenAI::Beta::Threads::Runs::RunStep::LastError? attr_accessor metadata: OpenAI::Models::metadata? @@ -57,33 +56,45 @@ module OpenAI attr_accessor type: OpenAI::Models::Beta::Threads::Runs::RunStep::type_ - attr_accessor usage: OpenAI::Models::Beta::Threads::Runs::RunStep::Usage? - - def initialize: - ( - id: String, - assistant_id: String, - cancelled_at: Integer?, - completed_at: Integer?, - created_at: Integer, - expired_at: Integer?, - failed_at: Integer?, - last_error: OpenAI::Models::Beta::Threads::Runs::RunStep::LastError?, - metadata: OpenAI::Models::metadata?, - run_id: String, - status: OpenAI::Models::Beta::Threads::Runs::RunStep::status, - step_details: OpenAI::Models::Beta::Threads::Runs::RunStep::step_details, - thread_id: String, - type: OpenAI::Models::Beta::Threads::Runs::RunStep::type_, - usage: OpenAI::Models::Beta::Threads::Runs::RunStep::Usage?, - object: :"thread.run.step" - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::run_step - | OpenAI::BaseModel data - ) -> void + attr_accessor usage: OpenAI::Beta::Threads::Runs::RunStep::Usage? - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::run_step + def initialize: ( + id: String, + assistant_id: String, + cancelled_at: Integer?, + completed_at: Integer?, + created_at: Integer, + expired_at: Integer?, + failed_at: Integer?, + last_error: OpenAI::Beta::Threads::Runs::RunStep::LastError?, + metadata: OpenAI::Models::metadata?, + run_id: String, + status: OpenAI::Models::Beta::Threads::Runs::RunStep::status, + step_details: OpenAI::Models::Beta::Threads::Runs::RunStep::step_details, + thread_id: String, + type: OpenAI::Models::Beta::Threads::Runs::RunStep::type_, + usage: OpenAI::Beta::Threads::Runs::RunStep::Usage?, + ?object: :"thread.run.step" + ) -> void + + def to_hash: -> { + id: String, + assistant_id: String, + cancelled_at: Integer?, + completed_at: Integer?, + created_at: Integer, + expired_at: Integer?, + failed_at: Integer?, + last_error: OpenAI::Beta::Threads::Runs::RunStep::LastError?, + metadata: OpenAI::Models::metadata?, + object: :"thread.run.step", + run_id: String, + status: OpenAI::Models::Beta::Threads::Runs::RunStep::status, + step_details: OpenAI::Models::Beta::Threads::Runs::RunStep::step_details, + thread_id: String, + type: OpenAI::Models::Beta::Threads::Runs::RunStep::type_, + usage: OpenAI::Beta::Threads::Runs::RunStep::Usage? + } type last_error = { @@ -91,61 +102,67 @@ module OpenAI message: String } - class LastError < OpenAI::BaseModel + class LastError < OpenAI::Internal::Type::BaseModel attr_accessor code: OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::code attr_accessor message: String - def initialize: - ( - code: OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::code, - message: String - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::RunStep::last_error - | OpenAI::BaseModel data - ) -> void + def initialize: ( + code: OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::code, + message: String + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::RunStep::last_error + def to_hash: -> { + code: OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::code, + message: String + } type code = :server_error | :rate_limit_exceeded - class Code < OpenAI::Enum + module Code + extend OpenAI::Internal::Type::Enum + SERVER_ERROR: :server_error RATE_LIMIT_EXCEEDED: :rate_limit_exceeded - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::code] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::code] end end type status = :in_progress | :cancelled | :failed | :completed | :expired - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress CANCELLED: :cancelled FAILED: :failed COMPLETED: :completed EXPIRED: :expired - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::RunStep::status] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::RunStep::status] end type step_details = - OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails - | OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails + OpenAI::Beta::Threads::Runs::MessageCreationStepDetails + | OpenAI::Beta::Threads::Runs::ToolCallsStepDetails + + module StepDetails + extend OpenAI::Internal::Type::Union - class StepDetails < OpenAI::Union - private def self.variants: -> [[:message_creation, OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails], [:tool_calls, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::Runs::RunStep::step_details] end type type_ = :message_creation | :tool_calls - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + MESSAGE_CREATION: :message_creation TOOL_CALLS: :tool_calls - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::RunStep::type_] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::RunStep::type_] end type usage = @@ -155,25 +172,24 @@ module OpenAI total_tokens: Integer } - class Usage < OpenAI::BaseModel + class Usage < OpenAI::Internal::Type::BaseModel attr_accessor completion_tokens: Integer attr_accessor prompt_tokens: Integer attr_accessor total_tokens: Integer - def initialize: - ( - completion_tokens: Integer, - prompt_tokens: Integer, - total_tokens: Integer - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::RunStep::usage - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::RunStep::usage + def initialize: ( + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + } end end end diff --git a/sig/openai/models/beta/threads/runs/run_step_delta.rbs b/sig/openai/models/beta/threads/runs/run_step_delta.rbs index 2d78e758..010d9373 100644 --- a/sig/openai/models/beta/threads/runs/run_step_delta.rbs +++ b/sig/openai/models/beta/threads/runs/run_step_delta.rbs @@ -2,7 +2,6 @@ module OpenAI module Models module Beta module Threads - class RunStepDelta = Runs::RunStepDelta module Runs @@ -11,30 +10,29 @@ module OpenAI step_details: OpenAI::Models::Beta::Threads::Runs::RunStepDelta::step_details } - class RunStepDelta < OpenAI::BaseModel + class RunStepDelta < OpenAI::Internal::Type::BaseModel attr_reader step_details: OpenAI::Models::Beta::Threads::Runs::RunStepDelta::step_details? def step_details=: ( OpenAI::Models::Beta::Threads::Runs::RunStepDelta::step_details ) -> OpenAI::Models::Beta::Threads::Runs::RunStepDelta::step_details - def initialize: - ( - step_details: OpenAI::Models::Beta::Threads::Runs::RunStepDelta::step_details - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::run_step_delta - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?step_details: OpenAI::Models::Beta::Threads::Runs::RunStepDelta::step_details + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::run_step_delta + def to_hash: -> { + step_details: OpenAI::Models::Beta::Threads::Runs::RunStepDelta::step_details + } type step_details = - OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta - | OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta + | OpenAI::Beta::Threads::Runs::ToolCallDeltaObject + + module StepDetails + extend OpenAI::Internal::Type::Union - class StepDetails < OpenAI::Union - private def self.variants: -> [[:message_creation, OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta], [:tool_calls, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject]] + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::Runs::RunStepDelta::step_details] end end end diff --git a/sig/openai/models/beta/threads/runs/run_step_delta_event.rbs b/sig/openai/models/beta/threads/runs/run_step_delta_event.rbs index 717849fb..70ca4c44 100644 --- a/sig/openai/models/beta/threads/runs/run_step_delta_event.rbs +++ b/sig/openai/models/beta/threads/runs/run_step_delta_event.rbs @@ -2,36 +2,34 @@ module OpenAI module Models module Beta module Threads - class RunStepDeltaEvent = Runs::RunStepDeltaEvent module Runs type run_step_delta_event = { id: String, - delta: OpenAI::Models::Beta::Threads::Runs::RunStepDelta, + delta: OpenAI::Beta::Threads::Runs::RunStepDelta, object: :"thread.run.step.delta" } - class RunStepDeltaEvent < OpenAI::BaseModel + class RunStepDeltaEvent < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor delta: OpenAI::Models::Beta::Threads::Runs::RunStepDelta + attr_accessor delta: OpenAI::Beta::Threads::Runs::RunStepDelta attr_accessor object: :"thread.run.step.delta" - def initialize: - ( - id: String, - delta: OpenAI::Models::Beta::Threads::Runs::RunStepDelta, - object: :"thread.run.step.delta" - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::run_step_delta_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + delta: OpenAI::Beta::Threads::Runs::RunStepDelta, + ?object: :"thread.run.step.delta" + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::run_step_delta_event + def to_hash: -> { + id: String, + delta: OpenAI::Beta::Threads::Runs::RunStepDelta, + object: :"thread.run.step.delta" + } end end end diff --git a/sig/openai/models/beta/threads/runs/run_step_delta_message_delta.rbs b/sig/openai/models/beta/threads/runs/run_step_delta_message_delta.rbs index cd740a0f..0bb8a0bb 100644 --- a/sig/openai/models/beta/threads/runs/run_step_delta_message_delta.rbs +++ b/sig/openai/models/beta/threads/runs/run_step_delta_message_delta.rbs @@ -2,52 +2,44 @@ module OpenAI module Models module Beta module Threads - class RunStepDeltaMessageDelta = Runs::RunStepDeltaMessageDelta module Runs type run_step_delta_message_delta = { type: :message_creation, - message_creation: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation + message_creation: OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation } - class RunStepDeltaMessageDelta < OpenAI::BaseModel + class RunStepDeltaMessageDelta < OpenAI::Internal::Type::BaseModel attr_accessor type: :message_creation - attr_reader message_creation: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation? + attr_reader message_creation: OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation? def message_creation=: ( - OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation - ) -> OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation + OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation + ) -> OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation - def initialize: - ( - message_creation: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation, - type: :message_creation - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::run_step_delta_message_delta - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?message_creation: OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation, + ?type: :message_creation + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::run_step_delta_message_delta + def to_hash: -> { + type: :message_creation, + message_creation: OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation + } type message_creation = { message_id: String } - class MessageCreation < OpenAI::BaseModel + class MessageCreation < OpenAI::Internal::Type::BaseModel attr_reader message_id: String? def message_id=: (String) -> String - def initialize: - (message_id: String) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::message_creation - | OpenAI::BaseModel data - ) -> void + def initialize: (?message_id: String) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::message_creation + def to_hash: -> { message_id: String } end end end diff --git a/sig/openai/models/beta/threads/runs/run_step_include.rbs b/sig/openai/models/beta/threads/runs/run_step_include.rbs index be00b41f..30d9eb94 100644 --- a/sig/openai/models/beta/threads/runs/run_step_include.rbs +++ b/sig/openai/models/beta/threads/runs/run_step_include.rbs @@ -2,17 +2,18 @@ module OpenAI module Models module Beta module Threads - - class RunStepInclude = Runs::RunStepInclude + module RunStepInclude = Runs::RunStepInclude module Runs type run_step_include = :"step_details.tool_calls[*].file_search.results[*].content" - class RunStepInclude < OpenAI::Enum + module RunStepInclude + extend OpenAI::Internal::Type::Enum + STEP_DETAILS_TOOL_CALLS_FILE_SEARCH_RESULTS_CONTENT: :"step_details.tool_calls[*].file_search.results[*].content" - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include] end end end diff --git a/sig/openai/models/beta/threads/runs/step_list_params.rbs b/sig/openai/models/beta/threads/runs/step_list_params.rbs index 1ff2c6fc..2eb9aae2 100644 --- a/sig/openai/models/beta/threads/runs/step_list_params.rbs +++ b/sig/openai/models/beta/threads/runs/step_list_params.rbs @@ -12,11 +12,11 @@ module OpenAI limit: Integer, order: OpenAI::Models::Beta::Threads::Runs::StepListParams::order } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class StepListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class StepListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor thread_id: String @@ -44,30 +44,35 @@ module OpenAI OpenAI::Models::Beta::Threads::Runs::StepListParams::order ) -> OpenAI::Models::Beta::Threads::Runs::StepListParams::order - def initialize: - ( - thread_id: String, - after: String, - before: String, - include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], - limit: Integer, - order: OpenAI::Models::Beta::Threads::Runs::StepListParams::order, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::step_list_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::step_list_params + def initialize: ( + thread_id: String, + ?after: String, + ?before: String, + ?include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], + ?limit: Integer, + ?order: OpenAI::Models::Beta::Threads::Runs::StepListParams::order, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + thread_id: String, + after: String, + before: String, + include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], + limit: Integer, + order: OpenAI::Models::Beta::Threads::Runs::StepListParams::order, + request_options: OpenAI::RequestOptions + } type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::StepListParams::order] + def self?.values: -> ::Array[OpenAI::Models::Beta::Threads::Runs::StepListParams::order] end end end diff --git a/sig/openai/models/beta/threads/runs/step_retrieve_params.rbs b/sig/openai/models/beta/threads/runs/step_retrieve_params.rbs index cfeb8e95..94750c9f 100644 --- a/sig/openai/models/beta/threads/runs/step_retrieve_params.rbs +++ b/sig/openai/models/beta/threads/runs/step_retrieve_params.rbs @@ -9,11 +9,11 @@ module OpenAI run_id: String, include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include] } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class StepRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class StepRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor thread_id: String @@ -25,19 +25,19 @@ module OpenAI ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include] ) -> ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include] - def initialize: - ( - thread_id: String, - run_id: String, - include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::step_retrieve_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + thread_id: String, + run_id: String, + ?include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::step_retrieve_params + def to_hash: -> { + thread_id: String, + run_id: String, + include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/beta/threads/runs/tool_call.rbs b/sig/openai/models/beta/threads/runs/tool_call.rbs index e4de6ce0..4337520d 100644 --- a/sig/openai/models/beta/threads/runs/tool_call.rbs +++ b/sig/openai/models/beta/threads/runs/tool_call.rbs @@ -4,12 +4,14 @@ module OpenAI module Threads module Runs type tool_call = - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall - | OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall - | OpenAI::Models::Beta::Threads::Runs::FunctionToolCall + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall + | OpenAI::Beta::Threads::Runs::FileSearchToolCall + | OpenAI::Beta::Threads::Runs::FunctionToolCall - class ToolCall < OpenAI::Union - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall], [:file_search, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall], [:function, OpenAI::Models::Beta::Threads::Runs::FunctionToolCall]] + module ToolCall + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call] end end end diff --git a/sig/openai/models/beta/threads/runs/tool_call_delta.rbs b/sig/openai/models/beta/threads/runs/tool_call_delta.rbs index 49679360..9ab47252 100644 --- a/sig/openai/models/beta/threads/runs/tool_call_delta.rbs +++ b/sig/openai/models/beta/threads/runs/tool_call_delta.rbs @@ -4,12 +4,14 @@ module OpenAI module Threads module Runs type tool_call_delta = - OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta - | OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta - | OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta + OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta + | OpenAI::Beta::Threads::Runs::FileSearchToolCallDelta + | OpenAI::Beta::Threads::Runs::FunctionToolCallDelta - class ToolCallDelta < OpenAI::Union - private def self.variants: -> [[:code_interpreter, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta], [:file_search, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta], [:function, OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta]] + module ToolCallDelta + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call_delta] end end end diff --git a/sig/openai/models/beta/threads/runs/tool_call_delta_object.rbs b/sig/openai/models/beta/threads/runs/tool_call_delta_object.rbs index 68e23f3a..758dd0c0 100644 --- a/sig/openai/models/beta/threads/runs/tool_call_delta_object.rbs +++ b/sig/openai/models/beta/threads/runs/tool_call_delta_object.rbs @@ -9,7 +9,7 @@ module OpenAI tool_calls: ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call_delta] } - class ToolCallDeltaObject < OpenAI::BaseModel + class ToolCallDeltaObject < OpenAI::Internal::Type::BaseModel attr_accessor type: :tool_calls attr_reader tool_calls: ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call_delta]? @@ -18,17 +18,15 @@ module OpenAI ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call_delta] ) -> ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call_delta] - def initialize: - ( - tool_calls: ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call_delta], - type: :tool_calls - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::tool_call_delta_object - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?tool_calls: ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call_delta], + ?type: :tool_calls + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::tool_call_delta_object + def to_hash: -> { + type: :tool_calls, + tool_calls: ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call_delta] + } end end end diff --git a/sig/openai/models/beta/threads/runs/tool_calls_step_details.rbs b/sig/openai/models/beta/threads/runs/tool_calls_step_details.rbs index 156c02c6..91baa731 100644 --- a/sig/openai/models/beta/threads/runs/tool_calls_step_details.rbs +++ b/sig/openai/models/beta/threads/runs/tool_calls_step_details.rbs @@ -9,22 +9,20 @@ module OpenAI type: :tool_calls } - class ToolCallsStepDetails < OpenAI::BaseModel + class ToolCallsStepDetails < OpenAI::Internal::Type::BaseModel attr_accessor tool_calls: ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call] attr_accessor type: :tool_calls - def initialize: - ( - tool_calls: ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call], - type: :tool_calls - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::Runs::tool_calls_step_details - | OpenAI::BaseModel data - ) -> void + def initialize: ( + tool_calls: ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call], + ?type: :tool_calls + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::Runs::tool_calls_step_details + def to_hash: -> { + tool_calls: ::Array[OpenAI::Models::Beta::Threads::Runs::tool_call], + type: :tool_calls + } end end end diff --git a/sig/openai/models/beta/threads/text.rbs b/sig/openai/models/beta/threads/text.rbs index 4311ade7..faff33c1 100644 --- a/sig/openai/models/beta/threads/text.rbs +++ b/sig/openai/models/beta/threads/text.rbs @@ -8,21 +8,20 @@ module OpenAI value: String } - class Text < OpenAI::BaseModel + class Text < OpenAI::Internal::Type::BaseModel attr_accessor annotations: ::Array[OpenAI::Models::Beta::Threads::annotation] attr_accessor value: String - def initialize: - ( - annotations: ::Array[OpenAI::Models::Beta::Threads::annotation], - value: String - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::text | OpenAI::BaseModel data - ) -> void + def initialize: ( + annotations: ::Array[OpenAI::Models::Beta::Threads::annotation], + value: String + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::text + def to_hash: -> { + annotations: ::Array[OpenAI::Models::Beta::Threads::annotation], + value: String + } end end end diff --git a/sig/openai/models/beta/threads/text_content_block.rbs b/sig/openai/models/beta/threads/text_content_block.rbs index 57b71af4..16343075 100644 --- a/sig/openai/models/beta/threads/text_content_block.rbs +++ b/sig/openai/models/beta/threads/text_content_block.rbs @@ -3,21 +3,19 @@ module OpenAI module Beta module Threads type text_content_block = - { text: OpenAI::Models::Beta::Threads::Text, type: :text } + { text: OpenAI::Beta::Threads::Text, type: :text } - class TextContentBlock < OpenAI::BaseModel - attr_accessor text: OpenAI::Models::Beta::Threads::Text + class TextContentBlock < OpenAI::Internal::Type::BaseModel + attr_accessor text: OpenAI::Beta::Threads::Text attr_accessor type: :text - def initialize: - (text: OpenAI::Models::Beta::Threads::Text, type: :text) -> void - | ( - ?OpenAI::Models::Beta::Threads::text_content_block - | OpenAI::BaseModel data - ) -> void + def initialize: ( + text: OpenAI::Beta::Threads::Text, + ?type: :text + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::text_content_block + def to_hash: -> { text: OpenAI::Beta::Threads::Text, type: :text } end end end diff --git a/sig/openai/models/beta/threads/text_content_block_param.rbs b/sig/openai/models/beta/threads/text_content_block_param.rbs index 669f9eed..2805c546 100644 --- a/sig/openai/models/beta/threads/text_content_block_param.rbs +++ b/sig/openai/models/beta/threads/text_content_block_param.rbs @@ -4,19 +4,14 @@ module OpenAI module Threads type text_content_block_param = { text: String, type: :text } - class TextContentBlockParam < OpenAI::BaseModel + class TextContentBlockParam < OpenAI::Internal::Type::BaseModel attr_accessor text: String attr_accessor type: :text - def initialize: - (text: String, type: :text) -> void - | ( - ?OpenAI::Models::Beta::Threads::text_content_block_param - | OpenAI::BaseModel data - ) -> void + def initialize: (text: String, ?type: :text) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::text_content_block_param + def to_hash: -> { text: String, type: :text } end end end diff --git a/sig/openai/models/beta/threads/text_delta.rbs b/sig/openai/models/beta/threads/text_delta.rbs index 9156cf24..fe100222 100644 --- a/sig/openai/models/beta/threads/text_delta.rbs +++ b/sig/openai/models/beta/threads/text_delta.rbs @@ -8,7 +8,7 @@ module OpenAI value: String } - class TextDelta < OpenAI::BaseModel + class TextDelta < OpenAI::Internal::Type::BaseModel attr_reader annotations: ::Array[OpenAI::Models::Beta::Threads::annotation_delta]? def annotations=: ( @@ -19,17 +19,15 @@ module OpenAI def value=: (String) -> String - def initialize: - ( - annotations: ::Array[OpenAI::Models::Beta::Threads::annotation_delta], - value: String - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::text_delta - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?annotations: ::Array[OpenAI::Models::Beta::Threads::annotation_delta], + ?value: String + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::text_delta + def to_hash: -> { + annotations: ::Array[OpenAI::Models::Beta::Threads::annotation_delta], + value: String + } end end end diff --git a/sig/openai/models/beta/threads/text_delta_block.rbs b/sig/openai/models/beta/threads/text_delta_block.rbs index 263bd619..5db737c3 100644 --- a/sig/openai/models/beta/threads/text_delta_block.rbs +++ b/sig/openai/models/beta/threads/text_delta_block.rbs @@ -6,32 +6,31 @@ module OpenAI { index: Integer, type: :text, - text: OpenAI::Models::Beta::Threads::TextDelta + text: OpenAI::Beta::Threads::TextDelta } - class TextDeltaBlock < OpenAI::BaseModel + class TextDeltaBlock < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_accessor type: :text - attr_reader text: OpenAI::Models::Beta::Threads::TextDelta? + attr_reader text: OpenAI::Beta::Threads::TextDelta? def text=: ( - OpenAI::Models::Beta::Threads::TextDelta - ) -> OpenAI::Models::Beta::Threads::TextDelta + OpenAI::Beta::Threads::TextDelta + ) -> OpenAI::Beta::Threads::TextDelta - def initialize: - ( - index: Integer, - text: OpenAI::Models::Beta::Threads::TextDelta, - type: :text - ) -> void - | ( - ?OpenAI::Models::Beta::Threads::text_delta_block - | OpenAI::BaseModel data - ) -> void + def initialize: ( + index: Integer, + ?text: OpenAI::Beta::Threads::TextDelta, + ?type: :text + ) -> void - def to_hash: -> OpenAI::Models::Beta::Threads::text_delta_block + def to_hash: -> { + index: Integer, + type: :text, + text: OpenAI::Beta::Threads::TextDelta + } end end end diff --git a/sig/openai/models/chat/chat_completion.rbs b/sig/openai/models/chat/chat_completion.rbs index b0f0b6b3..ee4cae65 100644 --- a/sig/openai/models/chat/chat_completion.rbs +++ b/sig/openai/models/chat/chat_completion.rbs @@ -1,25 +1,24 @@ module OpenAI module Models - class ChatCompletion = Chat::ChatCompletion module Chat type chat_completion = { id: String, - choices: ::Array[OpenAI::Models::Chat::ChatCompletion::Choice], + choices: ::Array[OpenAI::Chat::ChatCompletion::Choice], created: Integer, model: String, object: :"chat.completion", service_tier: OpenAI::Models::Chat::ChatCompletion::service_tier?, system_fingerprint: String, - usage: OpenAI::Models::CompletionUsage + usage: OpenAI::CompletionUsage } - class ChatCompletion < OpenAI::BaseModel + class ChatCompletion < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor choices: ::Array[OpenAI::Models::Chat::ChatCompletion::Choice] + attr_accessor choices: ::Array[OpenAI::Chat::ChatCompletion::Choice] attr_accessor created: Integer @@ -33,105 +32,113 @@ module OpenAI def system_fingerprint=: (String) -> String - attr_reader usage: OpenAI::Models::CompletionUsage? - - def usage=: ( - OpenAI::Models::CompletionUsage - ) -> OpenAI::Models::CompletionUsage - - def initialize: - ( - id: String, - choices: ::Array[OpenAI::Models::Chat::ChatCompletion::Choice], - created: Integer, - model: String, - service_tier: OpenAI::Models::Chat::ChatCompletion::service_tier?, - system_fingerprint: String, - usage: OpenAI::Models::CompletionUsage, - object: :"chat.completion" - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion | OpenAI::BaseModel data - ) -> void + attr_reader usage: OpenAI::CompletionUsage? + + def usage=: (OpenAI::CompletionUsage) -> OpenAI::CompletionUsage + + def initialize: ( + id: String, + choices: ::Array[OpenAI::Chat::ChatCompletion::Choice], + created: Integer, + model: String, + ?service_tier: OpenAI::Models::Chat::ChatCompletion::service_tier?, + ?system_fingerprint: String, + ?usage: OpenAI::CompletionUsage, + ?object: :"chat.completion" + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion + def to_hash: -> { + id: String, + choices: ::Array[OpenAI::Chat::ChatCompletion::Choice], + created: Integer, + model: String, + object: :"chat.completion", + service_tier: OpenAI::Models::Chat::ChatCompletion::service_tier?, + system_fingerprint: String, + usage: OpenAI::CompletionUsage + } type choice = { finish_reason: OpenAI::Models::Chat::ChatCompletion::Choice::finish_reason, index: Integer, - logprobs: OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs?, - message: OpenAI::Models::Chat::ChatCompletionMessage + logprobs: OpenAI::Chat::ChatCompletion::Choice::Logprobs?, + message: OpenAI::Chat::ChatCompletionMessage } - class Choice < OpenAI::BaseModel + class Choice < OpenAI::Internal::Type::BaseModel attr_accessor finish_reason: OpenAI::Models::Chat::ChatCompletion::Choice::finish_reason attr_accessor index: Integer - attr_accessor logprobs: OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs? + attr_accessor logprobs: OpenAI::Chat::ChatCompletion::Choice::Logprobs? - attr_accessor message: OpenAI::Models::Chat::ChatCompletionMessage + attr_accessor message: OpenAI::Chat::ChatCompletionMessage - def initialize: - ( - finish_reason: OpenAI::Models::Chat::ChatCompletion::Choice::finish_reason, - index: Integer, - logprobs: OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs?, - message: OpenAI::Models::Chat::ChatCompletionMessage - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletion::choice - | OpenAI::BaseModel data - ) -> void + def initialize: ( + finish_reason: OpenAI::Models::Chat::ChatCompletion::Choice::finish_reason, + index: Integer, + logprobs: OpenAI::Chat::ChatCompletion::Choice::Logprobs?, + message: OpenAI::Chat::ChatCompletionMessage + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletion::choice + def to_hash: -> { + finish_reason: OpenAI::Models::Chat::ChatCompletion::Choice::finish_reason, + index: Integer, + logprobs: OpenAI::Chat::ChatCompletion::Choice::Logprobs?, + message: OpenAI::Chat::ChatCompletionMessage + } type finish_reason = :stop | :length | :tool_calls | :content_filter | :function_call - class FinishReason < OpenAI::Enum + module FinishReason + extend OpenAI::Internal::Type::Enum + STOP: :stop LENGTH: :length TOOL_CALLS: :tool_calls CONTENT_FILTER: :content_filter FUNCTION_CALL: :function_call - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletion::Choice::finish_reason] + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletion::Choice::finish_reason] end type logprobs = { - content: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]?, - refusal: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]? + content: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]?, + refusal: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]? } - class Logprobs < OpenAI::BaseModel - attr_accessor content: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]? + class Logprobs < OpenAI::Internal::Type::BaseModel + attr_accessor content: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]? - attr_accessor refusal: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]? + attr_accessor refusal: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]? - def initialize: - ( - content: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]?, - refusal: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]? - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletion::Choice::logprobs - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]?, + refusal: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]? + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletion::Choice::logprobs + def to_hash: -> { + content: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]?, + refusal: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]? + } end end - type service_tier = :scale | :default + type service_tier = :auto | :default | :flex | :scale | :priority - class ServiceTier < OpenAI::Enum - SCALE: :scale + module ServiceTier + extend OpenAI::Internal::Type::Enum + + AUTO: :auto DEFAULT: :default + FLEX: :flex + SCALE: :scale + PRIORITY: :priority - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletion::service_tier] + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletion::service_tier] end end end diff --git a/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs b/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs new file mode 100644 index 00000000..6de6e1c5 --- /dev/null +++ b/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs @@ -0,0 +1,29 @@ +module OpenAI + module Models + class ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice + + module Chat + type chat_completion_allowed_tool_choice = + { + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools, + type: :allowed_tools + } + + class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel + attr_accessor allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools + + attr_accessor type: :allowed_tools + + def initialize: ( + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools, + ?type: :allowed_tools + ) -> void + + def to_hash: -> { + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools, + type: :allowed_tools + } + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_allowed_tools.rbs b/sig/openai/models/chat/chat_completion_allowed_tools.rbs new file mode 100644 index 00000000..0744b34f --- /dev/null +++ b/sig/openai/models/chat/chat_completion_allowed_tools.rbs @@ -0,0 +1,38 @@ +module OpenAI + module Models + module Chat + type chat_completion_allowed_tools = + { + mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode, + tools: ::Array[::Hash[Symbol, top]] + } + + class ChatCompletionAllowedTools < OpenAI::Internal::Type::BaseModel + attr_accessor mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode + + attr_accessor tools: ::Array[::Hash[Symbol, top]] + + def initialize: ( + mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode, + tools: ::Array[::Hash[Symbol, top]] + ) -> void + + def to_hash: -> { + mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode, + tools: ::Array[::Hash[Symbol, top]] + } + + type mode = :auto | :required + + module Mode + extend OpenAI::Internal::Type::Enum + + AUTO: :auto + REQUIRED: :required + + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionAllowedTools::mode] + end + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_assistant_message_param.rbs b/sig/openai/models/chat/chat_completion_assistant_message_param.rbs index db1fee4b..0b86be4a 100644 --- a/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_assistant_message_param.rbs @@ -1,28 +1,27 @@ module OpenAI module Models - class ChatCompletionAssistantMessageParam = Chat::ChatCompletionAssistantMessageParam module Chat type chat_completion_assistant_message_param = { role: :assistant, - audio: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio?, + audio: OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio?, content: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::content?, - function_call: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall?, + function_call: OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall?, name: String, refusal: String?, - tool_calls: ::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall] + tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] } - class ChatCompletionAssistantMessageParam < OpenAI::BaseModel + class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel attr_accessor role: :assistant - attr_accessor audio: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio? + attr_accessor audio: OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio? attr_accessor content: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::content? - attr_accessor function_call: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall? + attr_accessor function_call: OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall? attr_reader name: String? @@ -30,80 +29,74 @@ module OpenAI attr_accessor refusal: String? - attr_reader tool_calls: ::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall]? + attr_reader tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call]? def tool_calls=: ( - ::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall] - ) -> ::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall] - - def initialize: - ( - audio: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio?, - content: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::content?, - function_call: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall?, - name: String, - refusal: String?, - tool_calls: ::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall], - role: :assistant - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_assistant_message_param - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Chat::chat_completion_assistant_message_param + ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] + ) -> ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] + + def initialize: ( + ?audio: OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio?, + ?content: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::content?, + ?function_call: OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall?, + ?name: String, + ?refusal: String?, + ?tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call], + ?role: :assistant + ) -> void + + def to_hash: -> { + role: :assistant, + audio: OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio?, + content: OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::content?, + function_call: OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall?, + name: String, + refusal: String?, + tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] + } type audio = { id: String } - class Audio < OpenAI::BaseModel + class Audio < OpenAI::Internal::Type::BaseModel attr_accessor id: String - def initialize: - (id: String) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::audio - | OpenAI::BaseModel data - ) -> void + def initialize: (id: String) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::audio + def to_hash: -> { id: String } end type content = String | ::Array[OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::array_of_content_part] - class Content < OpenAI::Union - type array_of_content_part_array = - ::Array[OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::array_of_content_part] - - ArrayOfContentPartArray: array_of_content_part_array + module Content + extend OpenAI::Internal::Type::Union type array_of_content_part = - OpenAI::Models::Chat::ChatCompletionContentPartText - | OpenAI::Models::Chat::ChatCompletionContentPartRefusal + OpenAI::Chat::ChatCompletionContentPartText + | OpenAI::Chat::ChatCompletionContentPartRefusal - class ArrayOfContentPart < OpenAI::Union - private def self.variants: -> [[:text, OpenAI::Models::Chat::ChatCompletionContentPartText], [:refusal, OpenAI::Models::Chat::ChatCompletionContentPartRefusal]] + module ArrayOfContentPart + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::array_of_content_part] end - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::array_of_content_part]]] + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::content] + + ArrayOfContentPartArray: OpenAI::Internal::Type::Converter end type function_call = { arguments: String, name: String } - class FunctionCall < OpenAI::BaseModel + class FunctionCall < OpenAI::Internal::Type::BaseModel attr_accessor arguments: String attr_accessor name: String - def initialize: - (arguments: String, name: String) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::function_call - | OpenAI::BaseModel data - ) -> void + def initialize: (arguments: String, name: String) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::function_call + def to_hash: -> { arguments: String, name: String } end end end diff --git a/sig/openai/models/chat/chat_completion_audio.rbs b/sig/openai/models/chat/chat_completion_audio.rbs index f7d13734..e1cbcccc 100644 --- a/sig/openai/models/chat/chat_completion_audio.rbs +++ b/sig/openai/models/chat/chat_completion_audio.rbs @@ -1,13 +1,12 @@ module OpenAI module Models - class ChatCompletionAudio = Chat::ChatCompletionAudio module Chat type chat_completion_audio = { id: String, data: String, expires_at: Integer, transcript: String } - class ChatCompletionAudio < OpenAI::BaseModel + class ChatCompletionAudio < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor data: String @@ -16,19 +15,19 @@ module OpenAI attr_accessor transcript: String - def initialize: - ( - id: String, - data: String, - expires_at: Integer, - transcript: String - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_audio - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + data: String, + expires_at: Integer, + transcript: String + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_audio + def to_hash: -> { + id: String, + data: String, + expires_at: Integer, + transcript: String + } end end end diff --git a/sig/openai/models/chat/chat_completion_audio_param.rbs b/sig/openai/models/chat/chat_completion_audio_param.rbs index 6a35e189..3f77d8bd 100644 --- a/sig/openai/models/chat/chat_completion_audio_param.rbs +++ b/sig/openai/models/chat/chat_completion_audio_param.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class ChatCompletionAudioParam = Chat::ChatCompletionAudioParam module Chat @@ -10,39 +9,52 @@ module OpenAI voice: OpenAI::Models::Chat::ChatCompletionAudioParam::voice } - class ChatCompletionAudioParam < OpenAI::BaseModel + class ChatCompletionAudioParam < OpenAI::Internal::Type::BaseModel attr_accessor format_: OpenAI::Models::Chat::ChatCompletionAudioParam::format_ attr_accessor voice: OpenAI::Models::Chat::ChatCompletionAudioParam::voice - def initialize: - ( - format_: OpenAI::Models::Chat::ChatCompletionAudioParam::format_, - voice: OpenAI::Models::Chat::ChatCompletionAudioParam::voice - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_audio_param - | OpenAI::BaseModel data - ) -> void + def initialize: ( + format_: OpenAI::Models::Chat::ChatCompletionAudioParam::format_, + voice: OpenAI::Models::Chat::ChatCompletionAudioParam::voice + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_audio_param + def to_hash: -> { + format_: OpenAI::Models::Chat::ChatCompletionAudioParam::format_, + voice: OpenAI::Models::Chat::ChatCompletionAudioParam::voice + } - type format_ = :wav | :mp3 | :flac | :opus | :pcm16 + type format_ = :wav | :aac | :mp3 | :flac | :opus | :pcm16 + + module Format + extend OpenAI::Internal::Type::Enum - class Format < OpenAI::Enum WAV: :wav + AAC: :aac MP3: :mp3 FLAC: :flac OPUS: :opus PCM16: :pcm16 - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionAudioParam::format_] + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionAudioParam::format_] end type voice = - :alloy | :ash | :ballad | :coral | :echo | :sage | :shimmer | :verse + String + | :alloy + | :ash + | :ballad + | :coral + | :echo + | :sage + | :shimmer + | :verse + + module Voice + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionAudioParam::voice] - class Voice < OpenAI::Enum ALLOY: :alloy ASH: :ash BALLAD: :ballad @@ -51,8 +63,6 @@ module OpenAI SAGE: :sage SHIMMER: :shimmer VERSE: :verse - - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionAudioParam::voice] end end end diff --git a/sig/openai/models/chat/chat_completion_chunk.rbs b/sig/openai/models/chat/chat_completion_chunk.rbs index 980b2889..8c263bcd 100644 --- a/sig/openai/models/chat/chat_completion_chunk.rbs +++ b/sig/openai/models/chat/chat_completion_chunk.rbs @@ -1,25 +1,24 @@ module OpenAI module Models - class ChatCompletionChunk = Chat::ChatCompletionChunk module Chat type chat_completion_chunk = { id: String, - choices: ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice], + choices: ::Array[OpenAI::Chat::ChatCompletionChunk::Choice], created: Integer, model: String, object: :"chat.completion.chunk", service_tier: OpenAI::Models::Chat::ChatCompletionChunk::service_tier?, system_fingerprint: String, - usage: OpenAI::Models::CompletionUsage? + usage: OpenAI::CompletionUsage? } - class ChatCompletionChunk < OpenAI::BaseModel + class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor choices: ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice] + attr_accessor choices: ::Array[OpenAI::Chat::ChatCompletionChunk::Choice] attr_accessor created: Integer @@ -33,74 +32,78 @@ module OpenAI def system_fingerprint=: (String) -> String - attr_accessor usage: OpenAI::Models::CompletionUsage? - - def initialize: - ( - id: String, - choices: ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice], - created: Integer, - model: String, - service_tier: OpenAI::Models::Chat::ChatCompletionChunk::service_tier?, - system_fingerprint: String, - usage: OpenAI::Models::CompletionUsage?, - object: :"chat.completion.chunk" - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_chunk - | OpenAI::BaseModel data - ) -> void + attr_accessor usage: OpenAI::CompletionUsage? + + def initialize: ( + id: String, + choices: ::Array[OpenAI::Chat::ChatCompletionChunk::Choice], + created: Integer, + model: String, + ?service_tier: OpenAI::Models::Chat::ChatCompletionChunk::service_tier?, + ?system_fingerprint: String, + ?usage: OpenAI::CompletionUsage?, + ?object: :"chat.completion.chunk" + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_chunk + def to_hash: -> { + id: String, + choices: ::Array[OpenAI::Chat::ChatCompletionChunk::Choice], + created: Integer, + model: String, + object: :"chat.completion.chunk", + service_tier: OpenAI::Models::Chat::ChatCompletionChunk::service_tier?, + system_fingerprint: String, + usage: OpenAI::CompletionUsage? + } type choice = { - delta: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta, + delta: OpenAI::Chat::ChatCompletionChunk::Choice::Delta, finish_reason: OpenAI::Models::Chat::ChatCompletionChunk::Choice::finish_reason?, index: Integer, - logprobs: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs? + logprobs: OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs? } - class Choice < OpenAI::BaseModel - attr_accessor delta: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta + class Choice < OpenAI::Internal::Type::BaseModel + attr_accessor delta: OpenAI::Chat::ChatCompletionChunk::Choice::Delta attr_accessor finish_reason: OpenAI::Models::Chat::ChatCompletionChunk::Choice::finish_reason? attr_accessor index: Integer - attr_accessor logprobs: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs? + attr_accessor logprobs: OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs? - def initialize: - ( - delta: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta, - finish_reason: OpenAI::Models::Chat::ChatCompletionChunk::Choice::finish_reason?, - index: Integer, - logprobs: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs? - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionChunk::choice - | OpenAI::BaseModel data - ) -> void + def initialize: ( + delta: OpenAI::Chat::ChatCompletionChunk::Choice::Delta, + finish_reason: OpenAI::Models::Chat::ChatCompletionChunk::Choice::finish_reason?, + index: Integer, + ?logprobs: OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs? + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionChunk::choice + def to_hash: -> { + delta: OpenAI::Chat::ChatCompletionChunk::Choice::Delta, + finish_reason: OpenAI::Models::Chat::ChatCompletionChunk::Choice::finish_reason?, + index: Integer, + logprobs: OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs? + } type delta = { content: String?, - function_call: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, + function_call: OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, refusal: String?, role: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::role, - tool_calls: ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] + tool_calls: ::Array[OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] } - class Delta < OpenAI::BaseModel + class Delta < OpenAI::Internal::Type::BaseModel attr_accessor content: String? - attr_reader function_call: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall? + attr_reader function_call: OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall? def function_call=: ( - OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall - ) -> OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall + ) -> OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall attr_accessor refusal: String? @@ -110,30 +113,31 @@ module OpenAI OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::role ) -> OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::role - attr_reader tool_calls: ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall]? + attr_reader tool_calls: ::Array[OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall]? def tool_calls=: ( - ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] - ) -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] - - def initialize: - ( - content: String?, - function_call: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, - refusal: String?, - role: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::role, - tool_calls: ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionChunk::Choice::delta - | OpenAI::BaseModel data - ) -> void + ::Array[OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] + ) -> ::Array[OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] + + def initialize: ( + ?content: String?, + ?function_call: OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, + ?refusal: String?, + ?role: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::role, + ?tool_calls: ::Array[OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionChunk::Choice::delta + def to_hash: -> { + content: String?, + function_call: OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, + refusal: String?, + role: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::role, + tool_calls: ::Array[OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall] + } type function_call = { arguments: String, name: String } - class FunctionCall < OpenAI::BaseModel + class FunctionCall < OpenAI::Internal::Type::BaseModel attr_reader arguments: String? def arguments=: (String) -> String @@ -142,48 +146,45 @@ module OpenAI def name=: (String) -> String - def initialize: - (arguments: String, name: String) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::function_call - | OpenAI::BaseModel data - ) -> void + def initialize: (?arguments: String, ?name: String) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::function_call + def to_hash: -> { arguments: String, name: String } end type role = :developer | :system | :user | :assistant | :tool - class Role < OpenAI::Enum + module Role + extend OpenAI::Internal::Type::Enum + DEVELOPER: :developer SYSTEM: :system USER: :user ASSISTANT: :assistant TOOL: :tool - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::role] + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::role] end type tool_call = { index: Integer, id: String, - function: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, + function: OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, type: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::type_ } - class ToolCall < OpenAI::BaseModel + class ToolCall < OpenAI::Internal::Type::BaseModel attr_accessor index: Integer attr_reader id: String? def id=: (String) -> String - attr_reader function: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function? + attr_reader function: OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function? def function=: ( - OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function - ) -> OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function + OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function + ) -> OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function attr_reader type: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::type_? @@ -191,23 +192,23 @@ module OpenAI OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::type_ ) -> OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::type_ - def initialize: - ( - index: Integer, - id: String, - function: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, - type: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::type_ - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::tool_call - | OpenAI::BaseModel data - ) -> void + def initialize: ( + index: Integer, + ?id: String, + ?function: OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, + ?type: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::type_ + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::tool_call + def to_hash: -> { + index: Integer, + id: String, + function: OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, + type: OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::type_ + } type function = { arguments: String, name: String } - class Function < OpenAI::BaseModel + class Function < OpenAI::Internal::Type::BaseModel attr_reader arguments: String? def arguments=: (String) -> String @@ -216,22 +217,19 @@ module OpenAI def name=: (String) -> String - def initialize: - (arguments: String, name: String) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::function - | OpenAI::BaseModel data - ) -> void + def initialize: (?arguments: String, ?name: String) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::function + def to_hash: -> { arguments: String, name: String } end type type_ = :function - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + FUNCTION: :function - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::type_] + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::type_] end end end @@ -239,48 +237,53 @@ module OpenAI type finish_reason = :stop | :length | :tool_calls | :content_filter | :function_call - class FinishReason < OpenAI::Enum + module FinishReason + extend OpenAI::Internal::Type::Enum + STOP: :stop LENGTH: :length TOOL_CALLS: :tool_calls CONTENT_FILTER: :content_filter FUNCTION_CALL: :function_call - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::finish_reason] + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice::finish_reason] end type logprobs = { - content: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]?, - refusal: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]? + content: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]?, + refusal: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]? } - class Logprobs < OpenAI::BaseModel - attr_accessor content: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]? + class Logprobs < OpenAI::Internal::Type::BaseModel + attr_accessor content: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]? - attr_accessor refusal: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]? + attr_accessor refusal: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]? - def initialize: - ( - content: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]?, - refusal: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]? - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionChunk::Choice::logprobs - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]?, + refusal: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]? + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionChunk::Choice::logprobs + def to_hash: -> { + content: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]?, + refusal: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob]? + } end end - type service_tier = :scale | :default + type service_tier = :auto | :default | :flex | :scale | :priority - class ServiceTier < OpenAI::Enum - SCALE: :scale + module ServiceTier + extend OpenAI::Internal::Type::Enum + + AUTO: :auto DEFAULT: :default + FLEX: :flex + SCALE: :scale + PRIORITY: :priority - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::service_tier] + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::service_tier] end end end diff --git a/sig/openai/models/chat/chat_completion_content_part.rbs b/sig/openai/models/chat/chat_completion_content_part.rbs index f0459960..c0750762 100644 --- a/sig/openai/models/chat/chat_completion_content_part.rbs +++ b/sig/openai/models/chat/chat_completion_content_part.rbs @@ -1,42 +1,41 @@ module OpenAI module Models - - class ChatCompletionContentPart = Chat::ChatCompletionContentPart + module ChatCompletionContentPart = Chat::ChatCompletionContentPart module Chat type chat_completion_content_part = - OpenAI::Models::Chat::ChatCompletionContentPartText - | OpenAI::Models::Chat::ChatCompletionContentPartImage - | OpenAI::Models::Chat::ChatCompletionContentPartInputAudio - | OpenAI::Models::Chat::ChatCompletionContentPart::File + OpenAI::Chat::ChatCompletionContentPartText + | OpenAI::Chat::ChatCompletionContentPartImage + | OpenAI::Chat::ChatCompletionContentPartInputAudio + | OpenAI::Chat::ChatCompletionContentPart::File + + module ChatCompletionContentPart + extend OpenAI::Internal::Type::Union - class ChatCompletionContentPart < OpenAI::Union type file = { - file: OpenAI::Models::Chat::ChatCompletionContentPart::File::File, + file: OpenAI::Chat::ChatCompletionContentPart::File::File, type: :file } - class File < OpenAI::BaseModel - attr_accessor file: OpenAI::Models::Chat::ChatCompletionContentPart::File::File + class File < OpenAI::Internal::Type::BaseModel + attr_accessor file: OpenAI::Chat::ChatCompletionContentPart::File::File attr_accessor type: :file - def initialize: - ( - file: OpenAI::Models::Chat::ChatCompletionContentPart::File::File, - type: :file - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionContentPart::file - | OpenAI::BaseModel data - ) -> void + def initialize: ( + file: OpenAI::Chat::ChatCompletionContentPart::File::File, + ?type: :file + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionContentPart::file + def to_hash: -> { + file: OpenAI::Chat::ChatCompletionContentPart::File::File, + type: :file + } - type file = { file_data: String, file_id: String, file_name: String } + type file = { file_data: String, file_id: String, filename: String } - class File < OpenAI::BaseModel + class File < OpenAI::Internal::Type::BaseModel attr_reader file_data: String? def file_data=: (String) -> String @@ -45,22 +44,25 @@ module OpenAI def file_id=: (String) -> String - attr_reader file_name: String? + attr_reader filename: String? - def file_name=: (String) -> String + def filename=: (String) -> String - def initialize: - (file_data: String, file_id: String, file_name: String) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionContentPart::File::file - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?file_data: String, + ?file_id: String, + ?filename: String + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionContentPart::File::file + def to_hash: -> { + file_data: String, + file_id: String, + filename: String + } end end - private def self.variants: -> [[:text, OpenAI::Models::Chat::ChatCompletionContentPartText], [:image_url, OpenAI::Models::Chat::ChatCompletionContentPartImage], [:input_audio, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio], [:file, OpenAI::Models::Chat::ChatCompletionContentPart::File]] + def self?.variants: -> ::Array[OpenAI::Models::Chat::chat_completion_content_part] end end end diff --git a/sig/openai/models/chat/chat_completion_content_part_image.rbs b/sig/openai/models/chat/chat_completion_content_part_image.rbs index df324202..c8ae374c 100644 --- a/sig/openai/models/chat/chat_completion_content_part_image.rbs +++ b/sig/openai/models/chat/chat_completion_content_part_image.rbs @@ -1,31 +1,28 @@ module OpenAI module Models - class ChatCompletionContentPartImage = Chat::ChatCompletionContentPartImage module Chat type chat_completion_content_part_image = { - image_url: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL, + image_url: OpenAI::Chat::ChatCompletionContentPartImage::ImageURL, type: :image_url } - class ChatCompletionContentPartImage < OpenAI::BaseModel - attr_accessor image_url: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL + class ChatCompletionContentPartImage < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: OpenAI::Chat::ChatCompletionContentPartImage::ImageURL attr_accessor type: :image_url - def initialize: - ( - image_url: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL, - type: :image_url - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_content_part_image - | OpenAI::BaseModel data - ) -> void + def initialize: ( + image_url: OpenAI::Chat::ChatCompletionContentPartImage::ImageURL, + ?type: :image_url + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_content_part_image + def to_hash: -> { + image_url: OpenAI::Chat::ChatCompletionContentPartImage::ImageURL, + type: :image_url + } type image_url = { @@ -33,7 +30,7 @@ module OpenAI detail: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::detail } - class ImageURL < OpenAI::BaseModel + class ImageURL < OpenAI::Internal::Type::BaseModel attr_accessor url: String attr_reader detail: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::detail? @@ -42,26 +39,26 @@ module OpenAI OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::detail ) -> OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::detail - def initialize: - ( - url: String, - detail: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::detail - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionContentPartImage::image_url - | OpenAI::BaseModel data - ) -> void + def initialize: ( + url: String, + ?detail: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::detail + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionContentPartImage::image_url + def to_hash: -> { + url: String, + detail: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::detail + } type detail = :auto | :low | :high - class Detail < OpenAI::Enum + module Detail + extend OpenAI::Internal::Type::Enum + AUTO: :auto LOW: :low HIGH: :high - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::detail] + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::detail] end end end diff --git a/sig/openai/models/chat/chat_completion_content_part_input_audio.rbs b/sig/openai/models/chat/chat_completion_content_part_input_audio.rbs index 5bec0fc3..e2818299 100644 --- a/sig/openai/models/chat/chat_completion_content_part_input_audio.rbs +++ b/sig/openai/models/chat/chat_completion_content_part_input_audio.rbs @@ -1,31 +1,28 @@ module OpenAI module Models - class ChatCompletionContentPartInputAudio = Chat::ChatCompletionContentPartInputAudio module Chat type chat_completion_content_part_input_audio = { - input_audio: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio, + input_audio: OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio, type: :input_audio } - class ChatCompletionContentPartInputAudio < OpenAI::BaseModel - attr_accessor input_audio: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio + class ChatCompletionContentPartInputAudio < OpenAI::Internal::Type::BaseModel + attr_accessor input_audio: OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio attr_accessor type: :input_audio - def initialize: - ( - input_audio: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio, - type: :input_audio - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_content_part_input_audio - | OpenAI::BaseModel data - ) -> void + def initialize: ( + input_audio: OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio, + ?type: :input_audio + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_content_part_input_audio + def to_hash: -> { + input_audio: OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio, + type: :input_audio + } type input_audio = { @@ -33,30 +30,30 @@ module OpenAI format_: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::format_ } - class InputAudio < OpenAI::BaseModel + class InputAudio < OpenAI::Internal::Type::BaseModel attr_accessor data: String attr_accessor format_: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::format_ - def initialize: - ( - data: String, - format_: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::format_ - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::input_audio - | OpenAI::BaseModel data - ) -> void + def initialize: ( + data: String, + format_: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::format_ + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::input_audio + def to_hash: -> { + data: String, + format_: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::format_ + } type format_ = :wav | :mp3 - class Format < OpenAI::Enum + module Format + extend OpenAI::Internal::Type::Enum + WAV: :wav MP3: :mp3 - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::format_] + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::format_] end end end diff --git a/sig/openai/models/chat/chat_completion_content_part_refusal.rbs b/sig/openai/models/chat/chat_completion_content_part_refusal.rbs index 98e94526..3511c222 100644 --- a/sig/openai/models/chat/chat_completion_content_part_refusal.rbs +++ b/sig/openai/models/chat/chat_completion_content_part_refusal.rbs @@ -1,25 +1,19 @@ module OpenAI module Models - class ChatCompletionContentPartRefusal = Chat::ChatCompletionContentPartRefusal module Chat type chat_completion_content_part_refusal = { refusal: String, type: :refusal } - class ChatCompletionContentPartRefusal < OpenAI::BaseModel + class ChatCompletionContentPartRefusal < OpenAI::Internal::Type::BaseModel attr_accessor refusal: String attr_accessor type: :refusal - def initialize: - (refusal: String, type: :refusal) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_content_part_refusal - | OpenAI::BaseModel data - ) -> void + def initialize: (refusal: String, ?type: :refusal) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_content_part_refusal + def to_hash: -> { refusal: String, type: :refusal } end end end diff --git a/sig/openai/models/chat/chat_completion_content_part_text.rbs b/sig/openai/models/chat/chat_completion_content_part_text.rbs index 36f4b5a1..0581e14c 100644 --- a/sig/openai/models/chat/chat_completion_content_part_text.rbs +++ b/sig/openai/models/chat/chat_completion_content_part_text.rbs @@ -1,24 +1,18 @@ module OpenAI module Models - class ChatCompletionContentPartText = Chat::ChatCompletionContentPartText module Chat type chat_completion_content_part_text = { text: String, type: :text } - class ChatCompletionContentPartText < OpenAI::BaseModel + class ChatCompletionContentPartText < OpenAI::Internal::Type::BaseModel attr_accessor text: String attr_accessor type: :text - def initialize: - (text: String, type: :text) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_content_part_text - | OpenAI::BaseModel data - ) -> void + def initialize: (text: String, ?type: :text) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_content_part_text + def to_hash: -> { text: String, type: :text } end end end diff --git a/sig/openai/models/chat/chat_completion_custom_tool.rbs b/sig/openai/models/chat/chat_completion_custom_tool.rbs new file mode 100644 index 00000000..ac57cae7 --- /dev/null +++ b/sig/openai/models/chat/chat_completion_custom_tool.rbs @@ -0,0 +1,137 @@ +module OpenAI + module Models + class ChatCompletionCustomTool = Chat::ChatCompletionCustomTool + + module Chat + type chat_completion_custom_tool = + { + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom, + type: :custom + } + + class ChatCompletionCustomTool < OpenAI::Internal::Type::BaseModel + attr_accessor custom: OpenAI::Chat::ChatCompletionCustomTool::Custom + + attr_accessor type: :custom + + def initialize: ( + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom, + ?type: :custom + ) -> void + + def to_hash: -> { + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom, + type: :custom + } + + type custom = + { + name: String, + description: String, + format_: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_ + } + + class Custom < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + attr_reader description: String? + + def description=: (String) -> String + + attr_reader format_: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_? + + def format_=: ( + OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_ + ) -> OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_ + + def initialize: ( + name: String, + ?description: String, + ?format_: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_ + ) -> void + + def to_hash: -> { + name: String, + description: String, + format_: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_ + } + + type format_ = + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text + | OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar + + module Format + extend OpenAI::Internal::Type::Union + + type text = { type: :text } + + class Text < OpenAI::Internal::Type::BaseModel + attr_accessor type: :text + + def initialize: (?type: :text) -> void + + def to_hash: -> { type: :text } + end + + type grammar = + { + grammar: OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar, + type: :grammar + } + + class Grammar < OpenAI::Internal::Type::BaseModel + attr_accessor grammar: OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar + + attr_accessor type: :grammar + + def initialize: ( + grammar: OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar, + ?type: :grammar + ) -> void + + def to_hash: -> { + grammar: OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar, + type: :grammar + } + + type grammar = + { + definition: String, + syntax: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::syntax + } + + class Grammar < OpenAI::Internal::Type::BaseModel + attr_accessor definition: String + + attr_accessor syntax: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::syntax + + def initialize: ( + definition: String, + syntax: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::syntax + ) -> void + + def to_hash: -> { + definition: String, + syntax: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::syntax + } + + type syntax = :lark | :regex + + module Syntax + extend OpenAI::Internal::Type::Enum + + LARK: :lark + REGEX: :regex + + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::syntax] + end + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_] + end + end + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_deleted.rbs b/sig/openai/models/chat/chat_completion_deleted.rbs index 824a0a21..c37e21de 100644 --- a/sig/openai/models/chat/chat_completion_deleted.rbs +++ b/sig/openai/models/chat/chat_completion_deleted.rbs @@ -1,31 +1,29 @@ module OpenAI module Models - class ChatCompletionDeleted = Chat::ChatCompletionDeleted module Chat type chat_completion_deleted = { id: String, deleted: bool, object: :"chat.completion.deleted" } - class ChatCompletionDeleted < OpenAI::BaseModel + class ChatCompletionDeleted < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor deleted: bool attr_accessor object: :"chat.completion.deleted" - def initialize: - ( - id: String, - deleted: bool, - object: :"chat.completion.deleted" - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_deleted - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + deleted: bool, + ?object: :"chat.completion.deleted" + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_deleted + def to_hash: -> { + id: String, + deleted: bool, + object: :"chat.completion.deleted" + } end end end diff --git a/sig/openai/models/chat/chat_completion_developer_message_param.rbs b/sig/openai/models/chat/chat_completion_developer_message_param.rbs index e528c37a..3333b2ce 100644 --- a/sig/openai/models/chat/chat_completion_developer_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_developer_message_param.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class ChatCompletionDeveloperMessageParam = Chat::ChatCompletionDeveloperMessageParam module Chat @@ -11,7 +10,7 @@ module OpenAI name: String } - class ChatCompletionDeveloperMessageParam < OpenAI::BaseModel + class ChatCompletionDeveloperMessageParam < OpenAI::Internal::Type::BaseModel attr_accessor content: OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam::content attr_accessor role: :developer @@ -20,29 +19,27 @@ module OpenAI def name=: (String) -> String - def initialize: - ( - content: OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam::content, - name: String, - role: :developer - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_developer_message_param - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam::content, + ?name: String, + ?role: :developer + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_developer_message_param + def to_hash: -> { + content: OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam::content, + role: :developer, + name: String + } type content = - String | ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] + String | ::Array[OpenAI::Chat::ChatCompletionContentPartText] - class Content < OpenAI::Union - type chat_completion_content_part_text_array = - ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] + module Content + extend OpenAI::Internal::Type::Union - ChatCompletionContentPartTextArray: chat_completion_content_part_text_array + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam::content] - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]] + ChatCompletionContentPartTextArray: OpenAI::Internal::Type::Converter end end end diff --git a/sig/openai/models/chat/chat_completion_function_call_option.rbs b/sig/openai/models/chat/chat_completion_function_call_option.rbs index fdc44315..5ce652cc 100644 --- a/sig/openai/models/chat/chat_completion_function_call_option.rbs +++ b/sig/openai/models/chat/chat_completion_function_call_option.rbs @@ -1,22 +1,16 @@ module OpenAI module Models - class ChatCompletionFunctionCallOption = Chat::ChatCompletionFunctionCallOption module Chat type chat_completion_function_call_option = { name: String } - class ChatCompletionFunctionCallOption < OpenAI::BaseModel + class ChatCompletionFunctionCallOption < OpenAI::Internal::Type::BaseModel attr_accessor name: String - def initialize: - (name: String) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_function_call_option - | OpenAI::BaseModel data - ) -> void + def initialize: (name: String) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_function_call_option + def to_hash: -> { name: String } end end end diff --git a/sig/openai/models/chat/chat_completion_function_message_param.rbs b/sig/openai/models/chat/chat_completion_function_message_param.rbs index dca0f275..4bb967bc 100644 --- a/sig/openai/models/chat/chat_completion_function_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_function_message_param.rbs @@ -1,27 +1,25 @@ module OpenAI module Models - class ChatCompletionFunctionMessageParam = Chat::ChatCompletionFunctionMessageParam module Chat type chat_completion_function_message_param = { content: String?, name: String, role: :function } - class ChatCompletionFunctionMessageParam < OpenAI::BaseModel + class ChatCompletionFunctionMessageParam < OpenAI::Internal::Type::BaseModel attr_accessor content: String? attr_accessor name: String attr_accessor role: :function - def initialize: - (content: String?, name: String, role: :function) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_function_message_param - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: String?, + name: String, + ?role: :function + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_function_message_param + def to_hash: -> { content: String?, name: String, role: :function } end end end diff --git a/sig/openai/models/chat/chat_completion_function_tool.rbs b/sig/openai/models/chat/chat_completion_function_tool.rbs new file mode 100644 index 00000000..03d0abce --- /dev/null +++ b/sig/openai/models/chat/chat_completion_function_tool.rbs @@ -0,0 +1,26 @@ +module OpenAI + module Models + class ChatCompletionFunctionTool = Chat::ChatCompletionFunctionTool + + module Chat + type chat_completion_function_tool = + { function: OpenAI::FunctionDefinition, type: :function } + + class ChatCompletionFunctionTool < OpenAI::Internal::Type::BaseModel + attr_accessor function: OpenAI::FunctionDefinition + + attr_accessor type: :function + + def initialize: ( + function: OpenAI::FunctionDefinition, + ?type: :function + ) -> void + + def to_hash: -> { + function: OpenAI::FunctionDefinition, + type: :function + } + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_message.rbs b/sig/openai/models/chat/chat_completion_message.rbs index 16b8a72a..dbf4b405 100644 --- a/sig/openai/models/chat/chat_completion_message.rbs +++ b/sig/openai/models/chat/chat_completion_message.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class ChatCompletionMessage = Chat::ChatCompletionMessage module Chat @@ -9,78 +8,79 @@ module OpenAI content: String?, refusal: String?, role: :assistant, - annotations: ::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation], - audio: OpenAI::Models::Chat::ChatCompletionAudio?, - function_call: OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall, - tool_calls: ::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall] + annotations: ::Array[OpenAI::Chat::ChatCompletionMessage::Annotation], + audio: OpenAI::Chat::ChatCompletionAudio?, + function_call: OpenAI::Chat::ChatCompletionMessage::FunctionCall, + tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] } - class ChatCompletionMessage < OpenAI::BaseModel + class ChatCompletionMessage < OpenAI::Internal::Type::BaseModel attr_accessor content: String? attr_accessor refusal: String? attr_accessor role: :assistant - attr_reader annotations: ::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation]? + attr_reader annotations: ::Array[OpenAI::Chat::ChatCompletionMessage::Annotation]? def annotations=: ( - ::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation] - ) -> ::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation] + ::Array[OpenAI::Chat::ChatCompletionMessage::Annotation] + ) -> ::Array[OpenAI::Chat::ChatCompletionMessage::Annotation] - attr_accessor audio: OpenAI::Models::Chat::ChatCompletionAudio? + attr_accessor audio: OpenAI::Chat::ChatCompletionAudio? - attr_reader function_call: OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall? + attr_reader function_call: OpenAI::Chat::ChatCompletionMessage::FunctionCall? def function_call=: ( - OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall - ) -> OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall + OpenAI::Chat::ChatCompletionMessage::FunctionCall + ) -> OpenAI::Chat::ChatCompletionMessage::FunctionCall - attr_reader tool_calls: ::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall]? + attr_reader tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call]? def tool_calls=: ( - ::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall] - ) -> ::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall] - - def initialize: - ( - content: String?, - refusal: String?, - annotations: ::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation], - audio: OpenAI::Models::Chat::ChatCompletionAudio?, - function_call: OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall, - tool_calls: ::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall], - role: :assistant - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_message - | OpenAI::BaseModel data - ) -> void + ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] + ) -> ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] - def to_hash: -> OpenAI::Models::Chat::chat_completion_message + def initialize: ( + content: String?, + refusal: String?, + ?annotations: ::Array[OpenAI::Chat::ChatCompletionMessage::Annotation], + ?audio: OpenAI::Chat::ChatCompletionAudio?, + ?function_call: OpenAI::Chat::ChatCompletionMessage::FunctionCall, + ?tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call], + ?role: :assistant + ) -> void + + def to_hash: -> { + content: String?, + refusal: String?, + role: :assistant, + annotations: ::Array[OpenAI::Chat::ChatCompletionMessage::Annotation], + audio: OpenAI::Chat::ChatCompletionAudio?, + function_call: OpenAI::Chat::ChatCompletionMessage::FunctionCall, + tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] + } type annotation = { type: :url_citation, - url_citation: OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation + url_citation: OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation } - class Annotation < OpenAI::BaseModel + class Annotation < OpenAI::Internal::Type::BaseModel attr_accessor type: :url_citation - attr_accessor url_citation: OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation + attr_accessor url_citation: OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation - def initialize: - ( - url_citation: OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation, - type: :url_citation - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionMessage::annotation - | OpenAI::BaseModel data - ) -> void + def initialize: ( + url_citation: OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation, + ?type: :url_citation + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionMessage::annotation + def to_hash: -> { + type: :url_citation, + url_citation: OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation + } type url_citation = { @@ -90,7 +90,7 @@ module OpenAI url: String } - class URLCitation < OpenAI::BaseModel + class URLCitation < OpenAI::Internal::Type::BaseModel attr_accessor end_index: Integer attr_accessor start_index: Integer @@ -99,37 +99,32 @@ module OpenAI attr_accessor url: String - def initialize: - ( - end_index: Integer, - start_index: Integer, - title: String, - url: String - ) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionMessage::Annotation::url_citation - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Chat::ChatCompletionMessage::Annotation::url_citation + def initialize: ( + end_index: Integer, + start_index: Integer, + title: String, + url: String + ) -> void + + def to_hash: -> { + end_index: Integer, + start_index: Integer, + title: String, + url: String + } end end type function_call = { arguments: String, name: String } - class FunctionCall < OpenAI::BaseModel + class FunctionCall < OpenAI::Internal::Type::BaseModel attr_accessor arguments: String attr_accessor name: String - def initialize: - (arguments: String, name: String) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionMessage::function_call - | OpenAI::BaseModel data - ) -> void + def initialize: (arguments: String, name: String) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionMessage::function_call + def to_hash: -> { arguments: String, name: String } end end end diff --git a/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs b/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs new file mode 100644 index 00000000..b3852753 --- /dev/null +++ b/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs @@ -0,0 +1,46 @@ +module OpenAI + module Models + class ChatCompletionMessageCustomToolCall = Chat::ChatCompletionMessageCustomToolCall + + module Chat + type chat_completion_message_custom_tool_call = + { + id: String, + custom: OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom, + type: :custom + } + + class ChatCompletionMessageCustomToolCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor custom: OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom + + attr_accessor type: :custom + + def initialize: ( + id: String, + custom: OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom, + ?type: :custom + ) -> void + + def to_hash: -> { + id: String, + custom: OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom, + type: :custom + } + + type custom = { input: String, name: String } + + class Custom < OpenAI::Internal::Type::BaseModel + attr_accessor input: String + + attr_accessor name: String + + def initialize: (input: String, name: String) -> void + + def to_hash: -> { input: String, name: String } + end + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs b/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs new file mode 100644 index 00000000..ebd90f7f --- /dev/null +++ b/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs @@ -0,0 +1,46 @@ +module OpenAI + module Models + class ChatCompletionMessageFunctionToolCall = Chat::ChatCompletionMessageFunctionToolCall + + module Chat + type chat_completion_message_function_tool_call = + { + id: String, + function: OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function, + type: :function + } + + class ChatCompletionMessageFunctionToolCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor function: OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function + + attr_accessor type: :function + + def initialize: ( + id: String, + function: OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function, + ?type: :function + ) -> void + + def to_hash: -> { + id: String, + function: OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function, + type: :function + } + + type function = { arguments: String, name: String } + + class Function < OpenAI::Internal::Type::BaseModel + attr_accessor arguments: String + + attr_accessor name: String + + def initialize: (arguments: String, name: String) -> void + + def to_hash: -> { arguments: String, name: String } + end + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_message_param.rbs b/sig/openai/models/chat/chat_completion_message_param.rbs index 771ad5ab..8feb9872 100644 --- a/sig/openai/models/chat/chat_completion_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_message_param.rbs @@ -1,19 +1,20 @@ module OpenAI module Models - - class ChatCompletionMessageParam = Chat::ChatCompletionMessageParam + module ChatCompletionMessageParam = Chat::ChatCompletionMessageParam module Chat type chat_completion_message_param = - OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam - | OpenAI::Models::Chat::ChatCompletionSystemMessageParam - | OpenAI::Models::Chat::ChatCompletionUserMessageParam - | OpenAI::Models::Chat::ChatCompletionAssistantMessageParam - | OpenAI::Models::Chat::ChatCompletionToolMessageParam - | OpenAI::Models::Chat::ChatCompletionFunctionMessageParam + OpenAI::Chat::ChatCompletionDeveloperMessageParam + | OpenAI::Chat::ChatCompletionSystemMessageParam + | OpenAI::Chat::ChatCompletionUserMessageParam + | OpenAI::Chat::ChatCompletionAssistantMessageParam + | OpenAI::Chat::ChatCompletionToolMessageParam + | OpenAI::Chat::ChatCompletionFunctionMessageParam - class ChatCompletionMessageParam < OpenAI::Union - private def self.variants: -> [[:developer, OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam], [:system, OpenAI::Models::Chat::ChatCompletionSystemMessageParam], [:user, OpenAI::Models::Chat::ChatCompletionUserMessageParam], [:assistant, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam], [:tool, OpenAI::Models::Chat::ChatCompletionToolMessageParam], [:function, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam]] + module ChatCompletionMessageParam + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Chat::chat_completion_message_param] end end end diff --git a/sig/openai/models/chat/chat_completion_message_tool_call.rbs b/sig/openai/models/chat/chat_completion_message_tool_call.rbs index 797c8c1c..446c9f1e 100644 --- a/sig/openai/models/chat/chat_completion_message_tool_call.rbs +++ b/sig/openai/models/chat/chat_completion_message_tool_call.rbs @@ -1,52 +1,16 @@ module OpenAI module Models - - class ChatCompletionMessageToolCall = Chat::ChatCompletionMessageToolCall + module ChatCompletionMessageToolCall = Chat::ChatCompletionMessageToolCall module Chat type chat_completion_message_tool_call = - { - id: String, - function: OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function, - type: :function - } + OpenAI::Chat::ChatCompletionMessageFunctionToolCall + | OpenAI::Chat::ChatCompletionMessageCustomToolCall - class ChatCompletionMessageToolCall < OpenAI::BaseModel - attr_accessor id: String + module ChatCompletionMessageToolCall + extend OpenAI::Internal::Type::Union - attr_accessor function: OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function - - attr_accessor type: :function - - def initialize: - ( - id: String, - function: OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function, - type: :function - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_message_tool_call - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Chat::chat_completion_message_tool_call - - type function = { arguments: String, name: String } - - class Function < OpenAI::BaseModel - attr_accessor arguments: String - - attr_accessor name: String - - def initialize: - (arguments: String, name: String) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionMessageToolCall::function - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Chat::ChatCompletionMessageToolCall::function - end + def self?.variants: -> ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] end end end diff --git a/sig/openai/models/chat/chat_completion_modality.rbs b/sig/openai/models/chat/chat_completion_modality.rbs index e8df5962..c0b279d7 100644 --- a/sig/openai/models/chat/chat_completion_modality.rbs +++ b/sig/openai/models/chat/chat_completion_modality.rbs @@ -1,16 +1,17 @@ module OpenAI module Models - - class ChatCompletionModality = Chat::ChatCompletionModality + module ChatCompletionModality = Chat::ChatCompletionModality module Chat type chat_completion_modality = :text | :audio - class ChatCompletionModality < OpenAI::Enum + module ChatCompletionModality + extend OpenAI::Internal::Type::Enum + TEXT: :text AUDIO: :audio - def self.values: -> ::Array[OpenAI::Models::Chat::chat_completion_modality] + def self?.values: -> ::Array[OpenAI::Models::Chat::chat_completion_modality] end end end diff --git a/sig/openai/models/chat/chat_completion_named_tool_choice.rbs b/sig/openai/models/chat/chat_completion_named_tool_choice.rbs index e71c5191..50395d8f 100644 --- a/sig/openai/models/chat/chat_completion_named_tool_choice.rbs +++ b/sig/openai/models/chat/chat_completion_named_tool_choice.rbs @@ -1,45 +1,37 @@ module OpenAI module Models - class ChatCompletionNamedToolChoice = Chat::ChatCompletionNamedToolChoice module Chat type chat_completion_named_tool_choice = { - function: OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function, + function: OpenAI::Chat::ChatCompletionNamedToolChoice::Function, type: :function } - class ChatCompletionNamedToolChoice < OpenAI::BaseModel - attr_accessor function: OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function + class ChatCompletionNamedToolChoice < OpenAI::Internal::Type::BaseModel + attr_accessor function: OpenAI::Chat::ChatCompletionNamedToolChoice::Function attr_accessor type: :function - def initialize: - ( - function: OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function, - type: :function - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_named_tool_choice - | OpenAI::BaseModel data - ) -> void + def initialize: ( + function: OpenAI::Chat::ChatCompletionNamedToolChoice::Function, + ?type: :function + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_named_tool_choice + def to_hash: -> { + function: OpenAI::Chat::ChatCompletionNamedToolChoice::Function, + type: :function + } type function = { name: String } - class Function < OpenAI::BaseModel + class Function < OpenAI::Internal::Type::BaseModel attr_accessor name: String - def initialize: - (name: String) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionNamedToolChoice::function - | OpenAI::BaseModel data - ) -> void + def initialize: (name: String) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionNamedToolChoice::function + def to_hash: -> { name: String } end end end diff --git a/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs b/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs new file mode 100644 index 00000000..ec6ae0e9 --- /dev/null +++ b/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs @@ -0,0 +1,39 @@ +module OpenAI + module Models + class ChatCompletionNamedToolChoiceCustom = Chat::ChatCompletionNamedToolChoiceCustom + + module Chat + type chat_completion_named_tool_choice_custom = + { + custom: OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom, + type: :custom + } + + class ChatCompletionNamedToolChoiceCustom < OpenAI::Internal::Type::BaseModel + attr_accessor custom: OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom + + attr_accessor type: :custom + + def initialize: ( + custom: OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom, + ?type: :custom + ) -> void + + def to_hash: -> { + custom: OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom, + type: :custom + } + + type custom = { name: String } + + class Custom < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + def initialize: (name: String) -> void + + def to_hash: -> { name: String } + end + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_prediction_content.rbs b/sig/openai/models/chat/chat_completion_prediction_content.rbs index 4bd18cc2..c52fc481 100644 --- a/sig/openai/models/chat/chat_completion_prediction_content.rbs +++ b/sig/openai/models/chat/chat_completion_prediction_content.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class ChatCompletionPredictionContent = Chat::ChatCompletionPredictionContent module Chat @@ -10,33 +9,30 @@ module OpenAI type: :content } - class ChatCompletionPredictionContent < OpenAI::BaseModel + class ChatCompletionPredictionContent < OpenAI::Internal::Type::BaseModel attr_accessor content: OpenAI::Models::Chat::ChatCompletionPredictionContent::content attr_accessor type: :content - def initialize: - ( - content: OpenAI::Models::Chat::ChatCompletionPredictionContent::content, - type: :content - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_prediction_content - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Chat::ChatCompletionPredictionContent::content, + ?type: :content + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_prediction_content + def to_hash: -> { + content: OpenAI::Models::Chat::ChatCompletionPredictionContent::content, + type: :content + } type content = - String | ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] + String | ::Array[OpenAI::Chat::ChatCompletionContentPartText] - class Content < OpenAI::Union - type chat_completion_content_part_text_array = - ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] + module Content + extend OpenAI::Internal::Type::Union - ChatCompletionContentPartTextArray: chat_completion_content_part_text_array + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionPredictionContent::content] - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]] + ChatCompletionContentPartTextArray: OpenAI::Internal::Type::Converter end end end diff --git a/sig/openai/models/chat/chat_completion_reasoning_effort.rbs b/sig/openai/models/chat/chat_completion_reasoning_effort.rbs index 0d91a009..28d9e504 100644 --- a/sig/openai/models/chat/chat_completion_reasoning_effort.rbs +++ b/sig/openai/models/chat/chat_completion_reasoning_effort.rbs @@ -1,11 +1,9 @@ module OpenAI module Models - - class ChatCompletionReasoningEffort = Chat::ChatCompletionReasoningEffort + module ChatCompletionReasoningEffort = Chat::ChatCompletionReasoningEffort module Chat - - class ChatCompletionReasoningEffort = OpenAI::Models::ReasoningEffort + module ChatCompletionReasoningEffort = OpenAI::Models::ReasoningEffort end end end diff --git a/sig/openai/models/chat/chat_completion_role.rbs b/sig/openai/models/chat/chat_completion_role.rbs index be395c69..96104afd 100644 --- a/sig/openai/models/chat/chat_completion_role.rbs +++ b/sig/openai/models/chat/chat_completion_role.rbs @@ -1,13 +1,14 @@ module OpenAI module Models - - class ChatCompletionRole = Chat::ChatCompletionRole + module ChatCompletionRole = Chat::ChatCompletionRole module Chat type chat_completion_role = :developer | :system | :user | :assistant | :tool | :function - class ChatCompletionRole < OpenAI::Enum + module ChatCompletionRole + extend OpenAI::Internal::Type::Enum + DEVELOPER: :developer SYSTEM: :system USER: :user @@ -15,7 +16,7 @@ module OpenAI TOOL: :tool FUNCTION: :function - def self.values: -> ::Array[OpenAI::Models::Chat::chat_completion_role] + def self?.values: -> ::Array[OpenAI::Models::Chat::chat_completion_role] end end end diff --git a/sig/openai/models/chat/chat_completion_store_message.rbs b/sig/openai/models/chat/chat_completion_store_message.rbs index e96afacc..f40fd577 100644 --- a/sig/openai/models/chat/chat_completion_store_message.rbs +++ b/sig/openai/models/chat/chat_completion_store_message.rbs @@ -1,22 +1,44 @@ module OpenAI module Models - class ChatCompletionStoreMessage = Chat::ChatCompletionStoreMessage module Chat - type chat_completion_store_message = { id: String } + type chat_completion_store_message = + { + id: String, + content_parts: ::Array[OpenAI::Models::Chat::ChatCompletionStoreMessage::content_part]? + } class ChatCompletionStoreMessage < OpenAI::Models::Chat::ChatCompletionMessage - attr_accessor id: String + def id: -> String - def initialize: - (id: String) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_store_message - | OpenAI::BaseModel data - ) -> void + def id=: (String _) -> String - def to_hash: -> OpenAI::Models::Chat::chat_completion_store_message + def content_parts: -> ::Array[OpenAI::Models::Chat::ChatCompletionStoreMessage::content_part]? + + def content_parts=: ( + ::Array[OpenAI::Models::Chat::ChatCompletionStoreMessage::content_part]? _ + ) -> ::Array[OpenAI::Models::Chat::ChatCompletionStoreMessage::content_part]? + + def initialize: ( + id: String, + ?content_parts: ::Array[OpenAI::Models::Chat::ChatCompletionStoreMessage::content_part]? + ) -> void + + def to_hash: -> { + id: String, + content_parts: ::Array[OpenAI::Models::Chat::ChatCompletionStoreMessage::content_part]? + } + + type content_part = + OpenAI::Chat::ChatCompletionContentPartText + | OpenAI::Chat::ChatCompletionContentPartImage + + module ContentPart + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionStoreMessage::content_part] + end end end end diff --git a/sig/openai/models/chat/chat_completion_stream_options.rbs b/sig/openai/models/chat/chat_completion_stream_options.rbs index 8c4502bd..7217a030 100644 --- a/sig/openai/models/chat/chat_completion_stream_options.rbs +++ b/sig/openai/models/chat/chat_completion_stream_options.rbs @@ -1,24 +1,26 @@ module OpenAI module Models - class ChatCompletionStreamOptions = Chat::ChatCompletionStreamOptions module Chat - type chat_completion_stream_options = { include_usage: bool } + type chat_completion_stream_options = + { include_obfuscation: bool, include_usage: bool } + + class ChatCompletionStreamOptions < OpenAI::Internal::Type::BaseModel + attr_reader include_obfuscation: bool? + + def include_obfuscation=: (bool) -> bool - class ChatCompletionStreamOptions < OpenAI::BaseModel attr_reader include_usage: bool? def include_usage=: (bool) -> bool - def initialize: - (include_usage: bool) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_stream_options - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?include_obfuscation: bool, + ?include_usage: bool + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_stream_options + def to_hash: -> { include_obfuscation: bool, include_usage: bool } end end end diff --git a/sig/openai/models/chat/chat_completion_system_message_param.rbs b/sig/openai/models/chat/chat_completion_system_message_param.rbs index e5dc4d84..e1cd80e9 100644 --- a/sig/openai/models/chat/chat_completion_system_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_system_message_param.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class ChatCompletionSystemMessageParam = Chat::ChatCompletionSystemMessageParam module Chat @@ -11,7 +10,7 @@ module OpenAI name: String } - class ChatCompletionSystemMessageParam < OpenAI::BaseModel + class ChatCompletionSystemMessageParam < OpenAI::Internal::Type::BaseModel attr_accessor content: OpenAI::Models::Chat::ChatCompletionSystemMessageParam::content attr_accessor role: :system @@ -20,29 +19,27 @@ module OpenAI def name=: (String) -> String - def initialize: - ( - content: OpenAI::Models::Chat::ChatCompletionSystemMessageParam::content, - name: String, - role: :system - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_system_message_param - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Chat::ChatCompletionSystemMessageParam::content, + ?name: String, + ?role: :system + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_system_message_param + def to_hash: -> { + content: OpenAI::Models::Chat::ChatCompletionSystemMessageParam::content, + role: :system, + name: String + } type content = - String | ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] + String | ::Array[OpenAI::Chat::ChatCompletionContentPartText] - class Content < OpenAI::Union - type chat_completion_content_part_text_array = - ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] + module Content + extend OpenAI::Internal::Type::Union - ChatCompletionContentPartTextArray: chat_completion_content_part_text_array + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionSystemMessageParam::content] - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]] + ChatCompletionContentPartTextArray: OpenAI::Internal::Type::Converter end end end diff --git a/sig/openai/models/chat/chat_completion_token_logprob.rbs b/sig/openai/models/chat/chat_completion_token_logprob.rbs index cd55959d..3ad85411 100644 --- a/sig/openai/models/chat/chat_completion_token_logprob.rbs +++ b/sig/openai/models/chat/chat_completion_token_logprob.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class ChatCompletionTokenLogprob = Chat::ChatCompletionTokenLogprob module Chat @@ -9,50 +8,53 @@ module OpenAI token: String, bytes: ::Array[Integer]?, logprob: Float, - top_logprobs: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob] + top_logprobs: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob::TopLogprob] } - class ChatCompletionTokenLogprob < OpenAI::BaseModel + class ChatCompletionTokenLogprob < OpenAI::Internal::Type::BaseModel attr_accessor token: String attr_accessor bytes: ::Array[Integer]? attr_accessor logprob: Float - attr_accessor top_logprobs: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob] + attr_accessor top_logprobs: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob::TopLogprob] - def initialize: - ( - token: String, - bytes: ::Array[Integer]?, - logprob: Float, - top_logprobs: ::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob] - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_token_logprob - | OpenAI::BaseModel data - ) -> void + def initialize: ( + token: String, + bytes: ::Array[Integer]?, + logprob: Float, + top_logprobs: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob::TopLogprob] + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_token_logprob + def to_hash: -> { + token: String, + bytes: ::Array[Integer]?, + logprob: Float, + top_logprobs: ::Array[OpenAI::Chat::ChatCompletionTokenLogprob::TopLogprob] + } type top_logprob = { token: String, bytes: ::Array[Integer]?, logprob: Float } - class TopLogprob < OpenAI::BaseModel + class TopLogprob < OpenAI::Internal::Type::BaseModel attr_accessor token: String attr_accessor bytes: ::Array[Integer]? attr_accessor logprob: Float - def initialize: - (token: String, bytes: ::Array[Integer]?, logprob: Float) -> void - | ( - ?OpenAI::Models::Chat::ChatCompletionTokenLogprob::top_logprob - | OpenAI::BaseModel data - ) -> void + def initialize: ( + token: String, + bytes: ::Array[Integer]?, + logprob: Float + ) -> void - def to_hash: -> OpenAI::Models::Chat::ChatCompletionTokenLogprob::top_logprob + def to_hash: -> { + token: String, + bytes: ::Array[Integer]?, + logprob: Float + } end end end diff --git a/sig/openai/models/chat/chat_completion_tool.rbs b/sig/openai/models/chat/chat_completion_tool.rbs index 6dc1b119..34abaf37 100644 --- a/sig/openai/models/chat/chat_completion_tool.rbs +++ b/sig/openai/models/chat/chat_completion_tool.rbs @@ -1,27 +1,16 @@ module OpenAI module Models - - class ChatCompletionTool = Chat::ChatCompletionTool + module ChatCompletionTool = Chat::ChatCompletionTool module Chat type chat_completion_tool = - { function: OpenAI::Models::FunctionDefinition, type: :function } + OpenAI::Chat::ChatCompletionFunctionTool + | OpenAI::Chat::ChatCompletionCustomTool - class ChatCompletionTool < OpenAI::BaseModel - attr_accessor function: OpenAI::Models::FunctionDefinition + module ChatCompletionTool + extend OpenAI::Internal::Type::Union - attr_accessor type: :function - - def initialize: - ( - function: OpenAI::Models::FunctionDefinition, - type: :function - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_tool | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Chat::chat_completion_tool + def self?.variants: -> ::Array[OpenAI::Models::Chat::chat_completion_tool] end end end diff --git a/sig/openai/models/chat/chat_completion_tool_choice_option.rbs b/sig/openai/models/chat/chat_completion_tool_choice_option.rbs index d5d57aef..383ccb44 100644 --- a/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +++ b/sig/openai/models/chat/chat_completion_tool_choice_option.rbs @@ -1,25 +1,30 @@ module OpenAI module Models - - class ChatCompletionToolChoiceOption = Chat::ChatCompletionToolChoiceOption + module ChatCompletionToolChoiceOption = Chat::ChatCompletionToolChoiceOption module Chat type chat_completion_tool_choice_option = OpenAI::Models::Chat::ChatCompletionToolChoiceOption::auto - | OpenAI::Models::Chat::ChatCompletionNamedToolChoice + | OpenAI::Chat::ChatCompletionAllowedToolChoice + | OpenAI::Chat::ChatCompletionNamedToolChoice + | OpenAI::Chat::ChatCompletionNamedToolChoiceCustom + + module ChatCompletionToolChoiceOption + extend OpenAI::Internal::Type::Union - class ChatCompletionToolChoiceOption < OpenAI::Union type auto = :none | :auto | :required - class Auto < OpenAI::Enum + module Auto + extend OpenAI::Internal::Type::Enum + NONE: :none AUTO: :auto REQUIRED: :required - def self.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionToolChoiceOption::auto] + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionToolChoiceOption::auto] end - private def self.variants: -> [[nil, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::auto], [nil, OpenAI::Models::Chat::ChatCompletionNamedToolChoice]] + def self?.variants: -> ::Array[OpenAI::Models::Chat::chat_completion_tool_choice_option] end end end diff --git a/sig/openai/models/chat/chat_completion_tool_message_param.rbs b/sig/openai/models/chat/chat_completion_tool_message_param.rbs index 5082b0dd..10c157ab 100644 --- a/sig/openai/models/chat/chat_completion_tool_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_tool_message_param.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class ChatCompletionToolMessageParam = Chat::ChatCompletionToolMessageParam module Chat @@ -11,36 +10,34 @@ module OpenAI tool_call_id: String } - class ChatCompletionToolMessageParam < OpenAI::BaseModel + class ChatCompletionToolMessageParam < OpenAI::Internal::Type::BaseModel attr_accessor content: OpenAI::Models::Chat::ChatCompletionToolMessageParam::content attr_accessor role: :tool attr_accessor tool_call_id: String - def initialize: - ( - content: OpenAI::Models::Chat::ChatCompletionToolMessageParam::content, - tool_call_id: String, - role: :tool - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_tool_message_param - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Chat::ChatCompletionToolMessageParam::content, + tool_call_id: String, + ?role: :tool + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_tool_message_param + def to_hash: -> { + content: OpenAI::Models::Chat::ChatCompletionToolMessageParam::content, + role: :tool, + tool_call_id: String + } type content = - String | ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] + String | ::Array[OpenAI::Chat::ChatCompletionContentPartText] - class Content < OpenAI::Union - type chat_completion_content_part_text_array = - ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] + module Content + extend OpenAI::Internal::Type::Union - ChatCompletionContentPartTextArray: chat_completion_content_part_text_array + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionToolMessageParam::content] - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]] + ChatCompletionContentPartTextArray: OpenAI::Internal::Type::Converter end end end diff --git a/sig/openai/models/chat/chat_completion_user_message_param.rbs b/sig/openai/models/chat/chat_completion_user_message_param.rbs index f9b825b4..a021b82a 100644 --- a/sig/openai/models/chat/chat_completion_user_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_user_message_param.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class ChatCompletionUserMessageParam = Chat::ChatCompletionUserMessageParam module Chat @@ -11,7 +10,7 @@ module OpenAI name: String } - class ChatCompletionUserMessageParam < OpenAI::BaseModel + class ChatCompletionUserMessageParam < OpenAI::Internal::Type::BaseModel attr_accessor content: OpenAI::Models::Chat::ChatCompletionUserMessageParam::content attr_accessor role: :user @@ -20,29 +19,27 @@ module OpenAI def name=: (String) -> String - def initialize: - ( - content: OpenAI::Models::Chat::ChatCompletionUserMessageParam::content, - name: String, - role: :user - ) -> void - | ( - ?OpenAI::Models::Chat::chat_completion_user_message_param - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Chat::ChatCompletionUserMessageParam::content, + ?name: String, + ?role: :user + ) -> void - def to_hash: -> OpenAI::Models::Chat::chat_completion_user_message_param + def to_hash: -> { + content: OpenAI::Models::Chat::ChatCompletionUserMessageParam::content, + role: :user, + name: String + } type content = String | ::Array[OpenAI::Models::Chat::chat_completion_content_part] - class Content < OpenAI::Union - type chat_completion_content_part_array = - ::Array[OpenAI::Models::Chat::chat_completion_content_part] + module Content + extend OpenAI::Internal::Type::Union - ChatCompletionContentPartArray: chat_completion_content_part_array + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionUserMessageParam::content] - private def self.variants: -> [[nil, String], [nil, ::Array[OpenAI::Models::Chat::chat_completion_content_part]]] + ChatCompletionContentPartArray: OpenAI::Internal::Type::Converter end end end diff --git a/sig/openai/models/chat/completion_create_params.rbs b/sig/openai/models/chat/completion_create_params.rbs index c32fa792..e02095c4 100644 --- a/sig/openai/models/chat/completion_create_params.rbs +++ b/sig/openai/models/chat/completion_create_params.rbs @@ -5,10 +5,10 @@ module OpenAI { messages: ::Array[OpenAI::Models::Chat::chat_completion_message_param], model: OpenAI::Models::Chat::CompletionCreateParams::model, - audio: OpenAI::Models::Chat::ChatCompletionAudioParam?, + audio: OpenAI::Chat::ChatCompletionAudioParam?, frequency_penalty: Float?, function_call: OpenAI::Models::Chat::CompletionCreateParams::function_call, - functions: ::Array[OpenAI::Models::Chat::CompletionCreateParams::Function], + functions: ::Array[OpenAI::Chat::CompletionCreateParams::Function], logit_bias: ::Hash[Symbol, Integer]?, logprobs: bool?, max_completion_tokens: Integer?, @@ -17,34 +17,37 @@ module OpenAI modalities: ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality]?, n: Integer?, parallel_tool_calls: bool, - prediction: OpenAI::Models::Chat::ChatCompletionPredictionContent?, + prediction: OpenAI::Chat::ChatCompletionPredictionContent?, presence_penalty: Float?, + prompt_cache_key: String, reasoning_effort: OpenAI::Models::reasoning_effort?, response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, + safety_identifier: String, seed: Integer?, service_tier: OpenAI::Models::Chat::CompletionCreateParams::service_tier?, stop: OpenAI::Models::Chat::CompletionCreateParams::stop?, store: bool?, - stream_options: OpenAI::Models::Chat::ChatCompletionStreamOptions?, + stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, temperature: Float?, tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, - tools: ::Array[OpenAI::Models::Chat::ChatCompletionTool], + tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], top_logprobs: Integer?, top_p: Float?, user: String, - web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions + verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity?, + web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class CompletionCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class CompletionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor messages: ::Array[OpenAI::Models::Chat::chat_completion_message_param] attr_accessor model: OpenAI::Models::Chat::CompletionCreateParams::model - attr_accessor audio: OpenAI::Models::Chat::ChatCompletionAudioParam? + attr_accessor audio: OpenAI::Chat::ChatCompletionAudioParam? attr_accessor frequency_penalty: Float? @@ -54,11 +57,11 @@ module OpenAI OpenAI::Models::Chat::CompletionCreateParams::function_call ) -> OpenAI::Models::Chat::CompletionCreateParams::function_call - attr_reader functions: ::Array[OpenAI::Models::Chat::CompletionCreateParams::Function]? + attr_reader functions: ::Array[OpenAI::Chat::CompletionCreateParams::Function]? def functions=: ( - ::Array[OpenAI::Models::Chat::CompletionCreateParams::Function] - ) -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::Function] + ::Array[OpenAI::Chat::CompletionCreateParams::Function] + ) -> ::Array[OpenAI::Chat::CompletionCreateParams::Function] attr_accessor logit_bias: ::Hash[Symbol, Integer]? @@ -78,10 +81,14 @@ module OpenAI def parallel_tool_calls=: (bool) -> bool - attr_accessor prediction: OpenAI::Models::Chat::ChatCompletionPredictionContent? + attr_accessor prediction: OpenAI::Chat::ChatCompletionPredictionContent? attr_accessor presence_penalty: Float? + attr_reader prompt_cache_key: String? + + def prompt_cache_key=: (String) -> String + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? attr_reader response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format? @@ -90,6 +97,10 @@ module OpenAI OpenAI::Models::Chat::CompletionCreateParams::response_format ) -> OpenAI::Models::Chat::CompletionCreateParams::response_format + attr_reader safety_identifier: String? + + def safety_identifier=: (String) -> String + attr_accessor seed: Integer? attr_accessor service_tier: OpenAI::Models::Chat::CompletionCreateParams::service_tier? @@ -98,7 +109,7 @@ module OpenAI attr_accessor store: bool? - attr_accessor stream_options: OpenAI::Models::Chat::ChatCompletionStreamOptions? + attr_accessor stream_options: OpenAI::Chat::ChatCompletionStreamOptions? attr_accessor temperature: Float? @@ -108,11 +119,11 @@ module OpenAI OpenAI::Models::Chat::chat_completion_tool_choice_option ) -> OpenAI::Models::Chat::chat_completion_tool_choice_option - attr_reader tools: ::Array[OpenAI::Models::Chat::ChatCompletionTool]? + attr_reader tools: ::Array[OpenAI::Models::Chat::chat_completion_tool]? def tools=: ( - ::Array[OpenAI::Models::Chat::ChatCompletionTool] - ) -> ::Array[OpenAI::Models::Chat::ChatCompletionTool] + ::Array[OpenAI::Models::Chat::chat_completion_tool] + ) -> ::Array[OpenAI::Models::Chat::chat_completion_tool] attr_accessor top_logprobs: Integer? @@ -122,74 +133,115 @@ module OpenAI def user=: (String) -> String - attr_reader web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions? + attr_accessor verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity? + + attr_reader web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions? def web_search_options=: ( - OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions - ) -> OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions - - def initialize: - ( - messages: ::Array[OpenAI::Models::Chat::chat_completion_message_param], - model: OpenAI::Models::Chat::CompletionCreateParams::model, - audio: OpenAI::Models::Chat::ChatCompletionAudioParam?, - frequency_penalty: Float?, - function_call: OpenAI::Models::Chat::CompletionCreateParams::function_call, - functions: ::Array[OpenAI::Models::Chat::CompletionCreateParams::Function], - logit_bias: ::Hash[Symbol, Integer]?, - logprobs: bool?, - max_completion_tokens: Integer?, - max_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - modalities: ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality]?, - n: Integer?, - parallel_tool_calls: bool, - prediction: OpenAI::Models::Chat::ChatCompletionPredictionContent?, - presence_penalty: Float?, - reasoning_effort: OpenAI::Models::reasoning_effort?, - response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, - seed: Integer?, - service_tier: OpenAI::Models::Chat::CompletionCreateParams::service_tier?, - stop: OpenAI::Models::Chat::CompletionCreateParams::stop?, - store: bool?, - stream_options: OpenAI::Models::Chat::ChatCompletionStreamOptions?, - temperature: Float?, - tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, - tools: ::Array[OpenAI::Models::Chat::ChatCompletionTool], - top_logprobs: Integer?, - top_p: Float?, - user: String, - web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Chat::completion_create_params - | OpenAI::BaseModel data - ) -> void + OpenAI::Chat::CompletionCreateParams::WebSearchOptions + ) -> OpenAI::Chat::CompletionCreateParams::WebSearchOptions - def to_hash: -> OpenAI::Models::Chat::completion_create_params + def initialize: ( + messages: ::Array[OpenAI::Models::Chat::chat_completion_message_param], + model: OpenAI::Models::Chat::CompletionCreateParams::model, + ?audio: OpenAI::Chat::ChatCompletionAudioParam?, + ?frequency_penalty: Float?, + ?function_call: OpenAI::Models::Chat::CompletionCreateParams::function_call, + ?functions: ::Array[OpenAI::Chat::CompletionCreateParams::Function], + ?logit_bias: ::Hash[Symbol, Integer]?, + ?logprobs: bool?, + ?max_completion_tokens: Integer?, + ?max_tokens: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?modalities: ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality]?, + ?n: Integer?, + ?parallel_tool_calls: bool, + ?prediction: OpenAI::Chat::ChatCompletionPredictionContent?, + ?presence_penalty: Float?, + ?prompt_cache_key: String, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, + ?safety_identifier: String, + ?seed: Integer?, + ?service_tier: OpenAI::Models::Chat::CompletionCreateParams::service_tier?, + ?stop: OpenAI::Models::Chat::CompletionCreateParams::stop?, + ?store: bool?, + ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, + ?temperature: Float?, + ?tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, + ?tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], + ?top_logprobs: Integer?, + ?top_p: Float?, + ?user: String, + ?verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity?, + ?web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + messages: ::Array[OpenAI::Models::Chat::chat_completion_message_param], + model: OpenAI::Models::Chat::CompletionCreateParams::model, + audio: OpenAI::Chat::ChatCompletionAudioParam?, + frequency_penalty: Float?, + function_call: OpenAI::Models::Chat::CompletionCreateParams::function_call, + functions: ::Array[OpenAI::Chat::CompletionCreateParams::Function], + logit_bias: ::Hash[Symbol, Integer]?, + logprobs: bool?, + max_completion_tokens: Integer?, + max_tokens: Integer?, + metadata: OpenAI::Models::metadata?, + modalities: ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality]?, + n: Integer?, + parallel_tool_calls: bool, + prediction: OpenAI::Chat::ChatCompletionPredictionContent?, + presence_penalty: Float?, + prompt_cache_key: String, + reasoning_effort: OpenAI::Models::reasoning_effort?, + response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, + safety_identifier: String, + seed: Integer?, + service_tier: OpenAI::Models::Chat::CompletionCreateParams::service_tier?, + stop: OpenAI::Models::Chat::CompletionCreateParams::stop?, + store: bool?, + stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, + temperature: Float?, + tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, + tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], + top_logprobs: Integer?, + top_p: Float?, + user: String, + verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity?, + web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions, + request_options: OpenAI::RequestOptions + } type model = String | OpenAI::Models::chat_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::model] end type function_call = OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::function_call_mode - | OpenAI::Models::Chat::ChatCompletionFunctionCallOption + | OpenAI::Chat::ChatCompletionFunctionCallOption + + module FunctionCall + extend OpenAI::Internal::Type::Union - class FunctionCall < OpenAI::Union type function_call_mode = :none | :auto - class FunctionCallMode < OpenAI::Enum + module FunctionCallMode + extend OpenAI::Internal::Type::Enum + NONE: :none AUTO: :auto - def self.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::function_call_mode] + def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::function_call_mode] end - private def self.variants: -> [[nil, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::function_call_mode], [nil, OpenAI::Models::Chat::ChatCompletionFunctionCallOption]] + def self?.variants: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::function_call] end type function = @@ -199,7 +251,7 @@ module OpenAI parameters: OpenAI::Models::function_parameters } - class Function < OpenAI::BaseModel + class Function < OpenAI::Internal::Type::BaseModel attr_accessor name: String attr_reader description: String? @@ -212,116 +264,134 @@ module OpenAI OpenAI::Models::function_parameters ) -> OpenAI::Models::function_parameters - def initialize: - ( - name: String, - description: String, - parameters: OpenAI::Models::function_parameters - ) -> void - | ( - ?OpenAI::Models::Chat::CompletionCreateParams::function - | OpenAI::BaseModel data - ) -> void + def initialize: ( + name: String, + ?description: String, + ?parameters: OpenAI::Models::function_parameters + ) -> void - def to_hash: -> OpenAI::Models::Chat::CompletionCreateParams::function + def to_hash: -> { + name: String, + description: String, + parameters: OpenAI::Models::function_parameters + } end type modality = :text | :audio - class Modality < OpenAI::Enum + module Modality + extend OpenAI::Internal::Type::Enum + TEXT: :text AUDIO: :audio - def self.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality] + def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality] end type response_format = - OpenAI::Models::ResponseFormatText - | OpenAI::Models::ResponseFormatJSONSchema - | OpenAI::Models::ResponseFormatJSONObject + OpenAI::ResponseFormatText + | OpenAI::ResponseFormatJSONSchema + | OpenAI::ResponseFormatJSONObject + + module ResponseFormat + extend OpenAI::Internal::Type::Union - class ResponseFormat < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::ResponseFormatText], [nil, OpenAI::Models::ResponseFormatJSONSchema], [nil, OpenAI::Models::ResponseFormatJSONObject]] + def self?.variants: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::response_format] end - type service_tier = :auto | :default + type service_tier = :auto | :default | :flex | :scale | :priority + + module ServiceTier + extend OpenAI::Internal::Type::Enum - class ServiceTier < OpenAI::Enum AUTO: :auto DEFAULT: :default + FLEX: :flex + SCALE: :scale + PRIORITY: :priority - def self.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::service_tier] + def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::service_tier] end type stop = (String | ::Array[String])? - class Stop < OpenAI::Union - type string_array = ::Array[String] + module Stop + extend OpenAI::Internal::Type::Union - StringArray: string_array + def self?.variants: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::stop] - private def self.variants: -> [[nil, String], [nil, ::Array[String]]] + StringArray: OpenAI::Internal::Type::Converter + end + + type verbosity = :low | :medium | :high + + module Verbosity + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + + def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::verbosity] end type web_search_options = { search_context_size: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::search_context_size, - user_location: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation? + user_location: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation? } - class WebSearchOptions < OpenAI::BaseModel + class WebSearchOptions < OpenAI::Internal::Type::BaseModel attr_reader search_context_size: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::search_context_size? def search_context_size=: ( OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::search_context_size ) -> OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::search_context_size - attr_accessor user_location: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation? + attr_accessor user_location: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation? - def initialize: - ( - search_context_size: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::search_context_size, - user_location: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation? - ) -> void - | ( - ?OpenAI::Models::Chat::CompletionCreateParams::web_search_options - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?search_context_size: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::search_context_size, + ?user_location: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation? + ) -> void - def to_hash: -> OpenAI::Models::Chat::CompletionCreateParams::web_search_options + def to_hash: -> { + search_context_size: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::search_context_size, + user_location: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation? + } type search_context_size = :low | :medium | :high - class SearchContextSize < OpenAI::Enum + module SearchContextSize + extend OpenAI::Internal::Type::Enum + LOW: :low MEDIUM: :medium HIGH: :high - def self.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::search_context_size] + def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::search_context_size] end type user_location = { - approximate: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, + approximate: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, type: :approximate } - class UserLocation < OpenAI::BaseModel - attr_accessor approximate: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate + class UserLocation < OpenAI::Internal::Type::BaseModel + attr_accessor approximate: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate attr_accessor type: :approximate - def initialize: - ( - approximate: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, - type: :approximate - ) -> void - | ( - ?OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::user_location - | OpenAI::BaseModel data - ) -> void + def initialize: ( + approximate: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, + ?type: :approximate + ) -> void - def to_hash: -> OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::user_location + def to_hash: -> { + approximate: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, + type: :approximate + } type approximate = { @@ -331,7 +401,7 @@ module OpenAI timezone: String } - class Approximate < OpenAI::BaseModel + class Approximate < OpenAI::Internal::Type::BaseModel attr_reader city: String? def city=: (String) -> String @@ -348,19 +418,19 @@ module OpenAI def timezone=: (String) -> String - def initialize: - ( - city: String, - country: String, - region: String, - timezone: String - ) -> void - | ( - ?OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::approximate - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::approximate + def initialize: ( + ?city: String, + ?country: String, + ?region: String, + ?timezone: String + ) -> void + + def to_hash: -> { + city: String, + country: String, + region: String, + timezone: String + } end end end diff --git a/sig/openai/models/chat/completion_delete_params.rbs b/sig/openai/models/chat/completion_delete_params.rbs index ea7b5315..d3ddc656 100644 --- a/sig/openai/models/chat/completion_delete_params.rbs +++ b/sig/openai/models/chat/completion_delete_params.rbs @@ -1,20 +1,16 @@ module OpenAI module Models module Chat - type completion_delete_params = { } & OpenAI::request_parameters + type completion_delete_params = + { } & OpenAI::Internal::Type::request_parameters - class CompletionDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class CompletionDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Chat::completion_delete_params - | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::Chat::completion_delete_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/chat/completion_list_params.rbs b/sig/openai/models/chat/completion_list_params.rbs index b135cfcf..692b13c1 100644 --- a/sig/openai/models/chat/completion_list_params.rbs +++ b/sig/openai/models/chat/completion_list_params.rbs @@ -9,11 +9,11 @@ module OpenAI model: String, order: OpenAI::Models::Chat::CompletionListParams::order } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class CompletionListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class CompletionListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -35,29 +35,33 @@ module OpenAI OpenAI::Models::Chat::CompletionListParams::order ) -> OpenAI::Models::Chat::CompletionListParams::order - def initialize: - ( - after: String, - limit: Integer, - metadata: OpenAI::Models::metadata?, - model: String, - order: OpenAI::Models::Chat::CompletionListParams::order, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Chat::completion_list_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Chat::completion_list_params + def initialize: ( + ?after: String, + ?limit: Integer, + ?metadata: OpenAI::Models::metadata?, + ?model: String, + ?order: OpenAI::Models::Chat::CompletionListParams::order, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + limit: Integer, + metadata: OpenAI::Models::metadata?, + model: String, + order: OpenAI::Models::Chat::CompletionListParams::order, + request_options: OpenAI::RequestOptions + } type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::Chat::CompletionListParams::order] + def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionListParams::order] end end end diff --git a/sig/openai/models/chat/completion_retrieve_params.rbs b/sig/openai/models/chat/completion_retrieve_params.rbs index e29df6fe..e91d58a6 100644 --- a/sig/openai/models/chat/completion_retrieve_params.rbs +++ b/sig/openai/models/chat/completion_retrieve_params.rbs @@ -1,20 +1,16 @@ module OpenAI module Models module Chat - type completion_retrieve_params = { } & OpenAI::request_parameters + type completion_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters - class CompletionRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class CompletionRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Chat::completion_retrieve_params - | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::Chat::completion_retrieve_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/chat/completion_update_params.rbs b/sig/openai/models/chat/completion_update_params.rbs index 7551639e..6a169ae6 100644 --- a/sig/openai/models/chat/completion_update_params.rbs +++ b/sig/openai/models/chat/completion_update_params.rbs @@ -2,25 +2,24 @@ module OpenAI module Models module Chat type completion_update_params = - { metadata: OpenAI::Models::metadata? } & OpenAI::request_parameters + { metadata: OpenAI::Models::metadata? } + & OpenAI::Internal::Type::request_parameters - class CompletionUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class CompletionUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor metadata: OpenAI::Models::metadata? - def initialize: - ( - metadata: OpenAI::Models::metadata?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Chat::completion_update_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + metadata: OpenAI::Models::metadata?, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Chat::completion_update_params + def to_hash: -> { + metadata: OpenAI::Models::metadata?, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/chat/completions/message_list_params.rbs b/sig/openai/models/chat/completions/message_list_params.rbs index 232ef376..0e65a06b 100644 --- a/sig/openai/models/chat/completions/message_list_params.rbs +++ b/sig/openai/models/chat/completions/message_list_params.rbs @@ -8,11 +8,11 @@ module OpenAI limit: Integer, order: OpenAI::Models::Chat::Completions::MessageListParams::order } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class MessageListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class MessageListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -28,27 +28,29 @@ module OpenAI OpenAI::Models::Chat::Completions::MessageListParams::order ) -> OpenAI::Models::Chat::Completions::MessageListParams::order - def initialize: - ( - after: String, - limit: Integer, - order: OpenAI::Models::Chat::Completions::MessageListParams::order, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Chat::Completions::message_list_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::Chat::Completions::MessageListParams::order, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Chat::Completions::message_list_params + def to_hash: -> { + after: String, + limit: Integer, + order: OpenAI::Models::Chat::Completions::MessageListParams::order, + request_options: OpenAI::RequestOptions + } type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::Chat::Completions::MessageListParams::order] + def self?.values: -> ::Array[OpenAI::Models::Chat::Completions::MessageListParams::order] end end end diff --git a/sig/openai/models/chat_model.rbs b/sig/openai/models/chat_model.rbs index 5940f36f..afba9c16 100644 --- a/sig/openai/models/chat_model.rbs +++ b/sig/openai/models/chat_model.rbs @@ -1,7 +1,24 @@ module OpenAI module Models type chat_model = - :"o3-mini" + :"gpt-5" + | :"gpt-5-mini" + | :"gpt-5-nano" + | :"gpt-5-2025-08-07" + | :"gpt-5-mini-2025-08-07" + | :"gpt-5-nano-2025-08-07" + | :"gpt-5-chat-latest" + | :"gpt-4.1" + | :"gpt-4.1-mini" + | :"gpt-4.1-nano" + | :"gpt-4.1-2025-04-14" + | :"gpt-4.1-mini-2025-04-14" + | :"gpt-4.1-nano-2025-04-14" + | :"o4-mini" + | :"o4-mini-2025-04-16" + | :o3 + | :"o3-2025-04-16" + | :"o3-mini" | :"o3-mini-2025-01-31" | :o1 | :"o1-2024-12-17" @@ -9,11 +26,6 @@ module OpenAI | :"o1-preview-2024-09-12" | :"o1-mini" | :"o1-mini-2024-09-12" - | :"computer-use-preview" - | :"computer-use-preview-2025-02-04" - | :"computer-use-preview-2025-03-11" - | :"gpt-4.5-preview" - | :"gpt-4.5-preview-2025-02-27" | :"gpt-4o" | :"gpt-4o-2024-11-20" | :"gpt-4o-2024-08-06" @@ -21,9 +33,15 @@ module OpenAI | :"gpt-4o-audio-preview" | :"gpt-4o-audio-preview-2024-10-01" | :"gpt-4o-audio-preview-2024-12-17" + | :"gpt-4o-audio-preview-2025-06-03" | :"gpt-4o-mini-audio-preview" | :"gpt-4o-mini-audio-preview-2024-12-17" + | :"gpt-4o-search-preview" + | :"gpt-4o-mini-search-preview" + | :"gpt-4o-search-preview-2025-03-11" + | :"gpt-4o-mini-search-preview-2025-03-11" | :"chatgpt-4o-latest" + | :"codex-mini-latest" | :"gpt-4o-mini" | :"gpt-4o-mini-2024-07-18" | :"gpt-4-turbo" @@ -46,7 +64,26 @@ module OpenAI | :"gpt-3.5-turbo-0125" | :"gpt-3.5-turbo-16k-0613" - class ChatModel < OpenAI::Enum + module ChatModel + extend OpenAI::Internal::Type::Enum + + GPT_5: :"gpt-5" + GPT_5_MINI: :"gpt-5-mini" + GPT_5_NANO: :"gpt-5-nano" + GPT_5_2025_08_07: :"gpt-5-2025-08-07" + GPT_5_MINI_2025_08_07: :"gpt-5-mini-2025-08-07" + GPT_5_NANO_2025_08_07: :"gpt-5-nano-2025-08-07" + GPT_5_CHAT_LATEST: :"gpt-5-chat-latest" + GPT_4_1: :"gpt-4.1" + GPT_4_1_MINI: :"gpt-4.1-mini" + GPT_4_1_NANO: :"gpt-4.1-nano" + GPT_4_1_2025_04_14: :"gpt-4.1-2025-04-14" + GPT_4_1_MINI_2025_04_14: :"gpt-4.1-mini-2025-04-14" + GPT_4_1_NANO_2025_04_14: :"gpt-4.1-nano-2025-04-14" + O4_MINI: :"o4-mini" + O4_MINI_2025_04_16: :"o4-mini-2025-04-16" + O3: :o3 + O3_2025_04_16: :"o3-2025-04-16" O3_MINI: :"o3-mini" O3_MINI_2025_01_31: :"o3-mini-2025-01-31" O1: :o1 @@ -55,11 +92,6 @@ module OpenAI O1_PREVIEW_2024_09_12: :"o1-preview-2024-09-12" O1_MINI: :"o1-mini" O1_MINI_2024_09_12: :"o1-mini-2024-09-12" - COMPUTER_USE_PREVIEW: :"computer-use-preview" - COMPUTER_USE_PREVIEW_2025_02_04: :"computer-use-preview-2025-02-04" - COMPUTER_USE_PREVIEW_2025_03_11: :"computer-use-preview-2025-03-11" - GPT_4_5_PREVIEW: :"gpt-4.5-preview" - GPT_4_5_PREVIEW_2025_02_27: :"gpt-4.5-preview-2025-02-27" GPT_4O: :"gpt-4o" GPT_4O_2024_11_20: :"gpt-4o-2024-11-20" GPT_4O_2024_08_06: :"gpt-4o-2024-08-06" @@ -67,9 +99,15 @@ module OpenAI GPT_4O_AUDIO_PREVIEW: :"gpt-4o-audio-preview" GPT_4O_AUDIO_PREVIEW_2024_10_01: :"gpt-4o-audio-preview-2024-10-01" GPT_4O_AUDIO_PREVIEW_2024_12_17: :"gpt-4o-audio-preview-2024-12-17" + GPT_4O_AUDIO_PREVIEW_2025_06_03: :"gpt-4o-audio-preview-2025-06-03" GPT_4O_MINI_AUDIO_PREVIEW: :"gpt-4o-mini-audio-preview" GPT_4O_MINI_AUDIO_PREVIEW_2024_12_17: :"gpt-4o-mini-audio-preview-2024-12-17" + GPT_4O_SEARCH_PREVIEW: :"gpt-4o-search-preview" + GPT_4O_MINI_SEARCH_PREVIEW: :"gpt-4o-mini-search-preview" + GPT_4O_SEARCH_PREVIEW_2025_03_11: :"gpt-4o-search-preview-2025-03-11" + GPT_4O_MINI_SEARCH_PREVIEW_2025_03_11: :"gpt-4o-mini-search-preview-2025-03-11" CHATGPT_4O_LATEST: :"chatgpt-4o-latest" + CODEX_MINI_LATEST: :"codex-mini-latest" GPT_4O_MINI: :"gpt-4o-mini" GPT_4O_MINI_2024_07_18: :"gpt-4o-mini-2024-07-18" GPT_4_TURBO: :"gpt-4-turbo" @@ -92,7 +130,7 @@ module OpenAI GPT_3_5_TURBO_0125: :"gpt-3.5-turbo-0125" GPT_3_5_TURBO_16K_0613: :"gpt-3.5-turbo-16k-0613" - def self.values: -> ::Array[OpenAI::Models::chat_model] + def self?.values: -> ::Array[OpenAI::Models::chat_model] end end end diff --git a/sig/openai/models/comparison_filter.rbs b/sig/openai/models/comparison_filter.rbs index 435d3cc2..f5f686ba 100644 --- a/sig/openai/models/comparison_filter.rbs +++ b/sig/openai/models/comparison_filter.rbs @@ -7,26 +7,30 @@ module OpenAI value: OpenAI::Models::ComparisonFilter::value } - class ComparisonFilter < OpenAI::BaseModel + class ComparisonFilter < OpenAI::Internal::Type::BaseModel attr_accessor key: String attr_accessor type: OpenAI::Models::ComparisonFilter::type_ attr_accessor value: OpenAI::Models::ComparisonFilter::value - def initialize: - ( - key: String, - type: OpenAI::Models::ComparisonFilter::type_, - value: OpenAI::Models::ComparisonFilter::value - ) -> void - | (?OpenAI::Models::comparison_filter | OpenAI::BaseModel data) -> void + def initialize: ( + key: String, + type: OpenAI::Models::ComparisonFilter::type_, + value: OpenAI::Models::ComparisonFilter::value + ) -> void - def to_hash: -> OpenAI::Models::comparison_filter + def to_hash: -> { + key: String, + type: OpenAI::Models::ComparisonFilter::type_, + value: OpenAI::Models::ComparisonFilter::value + } type type_ = :eq | :ne | :gt | :gte | :lt | :lte - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + EQ: :eq NE: :ne GT: :gt @@ -34,13 +38,15 @@ module OpenAI LT: :lt LTE: :lte - def self.values: -> ::Array[OpenAI::Models::ComparisonFilter::type_] + def self?.values: -> ::Array[OpenAI::Models::ComparisonFilter::type_] end type value = String | Float | bool - class Value < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + module Value + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::ComparisonFilter::value] end end end diff --git a/sig/openai/models/completion.rbs b/sig/openai/models/completion.rbs index 4394b753..40b716e4 100644 --- a/sig/openai/models/completion.rbs +++ b/sig/openai/models/completion.rbs @@ -3,18 +3,18 @@ module OpenAI type completion = { id: String, - choices: ::Array[OpenAI::Models::CompletionChoice], + choices: ::Array[OpenAI::CompletionChoice], created: Integer, model: String, object: :text_completion, system_fingerprint: String, - usage: OpenAI::Models::CompletionUsage + usage: OpenAI::CompletionUsage } - class Completion < OpenAI::BaseModel + class Completion < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor choices: ::Array[OpenAI::Models::CompletionChoice] + attr_accessor choices: ::Array[OpenAI::CompletionChoice] attr_accessor created: Integer @@ -26,25 +26,29 @@ module OpenAI def system_fingerprint=: (String) -> String - attr_reader usage: OpenAI::Models::CompletionUsage? - - def usage=: ( - OpenAI::Models::CompletionUsage - ) -> OpenAI::Models::CompletionUsage - - def initialize: - ( - id: String, - choices: ::Array[OpenAI::Models::CompletionChoice], - created: Integer, - model: String, - system_fingerprint: String, - usage: OpenAI::Models::CompletionUsage, - object: :text_completion - ) -> void - | (?OpenAI::Models::completion | OpenAI::BaseModel data) -> void - - def to_hash: -> OpenAI::Models::completion + attr_reader usage: OpenAI::CompletionUsage? + + def usage=: (OpenAI::CompletionUsage) -> OpenAI::CompletionUsage + + def initialize: ( + id: String, + choices: ::Array[OpenAI::CompletionChoice], + created: Integer, + model: String, + ?system_fingerprint: String, + ?usage: OpenAI::CompletionUsage, + ?object: :text_completion + ) -> void + + def to_hash: -> { + id: String, + choices: ::Array[OpenAI::CompletionChoice], + created: Integer, + model: String, + object: :text_completion, + system_fingerprint: String, + usage: OpenAI::CompletionUsage + } end end end diff --git a/sig/openai/models/completion_choice.rbs b/sig/openai/models/completion_choice.rbs index 847712cf..53a0ea09 100644 --- a/sig/openai/models/completion_choice.rbs +++ b/sig/openai/models/completion_choice.rbs @@ -4,38 +4,43 @@ module OpenAI { finish_reason: OpenAI::Models::CompletionChoice::finish_reason, index: Integer, - logprobs: OpenAI::Models::CompletionChoice::Logprobs?, + logprobs: OpenAI::CompletionChoice::Logprobs?, text: String } - class CompletionChoice < OpenAI::BaseModel + class CompletionChoice < OpenAI::Internal::Type::BaseModel attr_accessor finish_reason: OpenAI::Models::CompletionChoice::finish_reason attr_accessor index: Integer - attr_accessor logprobs: OpenAI::Models::CompletionChoice::Logprobs? + attr_accessor logprobs: OpenAI::CompletionChoice::Logprobs? attr_accessor text: String - def initialize: - ( - finish_reason: OpenAI::Models::CompletionChoice::finish_reason, - index: Integer, - logprobs: OpenAI::Models::CompletionChoice::Logprobs?, - text: String - ) -> void - | (?OpenAI::Models::completion_choice | OpenAI::BaseModel data) -> void + def initialize: ( + finish_reason: OpenAI::Models::CompletionChoice::finish_reason, + index: Integer, + logprobs: OpenAI::CompletionChoice::Logprobs?, + text: String + ) -> void - def to_hash: -> OpenAI::Models::completion_choice + def to_hash: -> { + finish_reason: OpenAI::Models::CompletionChoice::finish_reason, + index: Integer, + logprobs: OpenAI::CompletionChoice::Logprobs?, + text: String + } type finish_reason = :stop | :length | :content_filter - class FinishReason < OpenAI::Enum + module FinishReason + extend OpenAI::Internal::Type::Enum + STOP: :stop LENGTH: :length CONTENT_FILTER: :content_filter - def self.values: -> ::Array[OpenAI::Models::CompletionChoice::finish_reason] + def self?.values: -> ::Array[OpenAI::Models::CompletionChoice::finish_reason] end type logprobs = @@ -46,7 +51,7 @@ module OpenAI top_logprobs: ::Array[::Hash[Symbol, Float]] } - class Logprobs < OpenAI::BaseModel + class Logprobs < OpenAI::Internal::Type::BaseModel attr_reader text_offset: ::Array[Integer]? def text_offset=: (::Array[Integer]) -> ::Array[Integer] @@ -65,18 +70,19 @@ module OpenAI ::Array[::Hash[Symbol, Float]] ) -> ::Array[::Hash[Symbol, Float]] - def initialize: - ( - text_offset: ::Array[Integer], - token_logprobs: ::Array[Float], - tokens: ::Array[String], - top_logprobs: ::Array[::Hash[Symbol, Float]] - ) -> void - | ( - ?OpenAI::Models::CompletionChoice::logprobs | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::CompletionChoice::logprobs + def initialize: ( + ?text_offset: ::Array[Integer], + ?token_logprobs: ::Array[Float], + ?tokens: ::Array[String], + ?top_logprobs: ::Array[::Hash[Symbol, Float]] + ) -> void + + def to_hash: -> { + text_offset: ::Array[Integer], + token_logprobs: ::Array[Float], + tokens: ::Array[String], + top_logprobs: ::Array[::Hash[Symbol, Float]] + } end end end diff --git a/sig/openai/models/completion_create_params.rbs b/sig/openai/models/completion_create_params.rbs index 362d945c..5dcdfb9d 100644 --- a/sig/openai/models/completion_create_params.rbs +++ b/sig/openai/models/completion_create_params.rbs @@ -14,17 +14,17 @@ module OpenAI presence_penalty: Float?, seed: Integer?, stop: OpenAI::Models::CompletionCreateParams::stop?, - stream_options: OpenAI::Models::Chat::ChatCompletionStreamOptions?, + stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, suffix: String?, temperature: Float?, top_p: Float?, user: String } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class CompletionCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class CompletionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor model: OpenAI::Models::CompletionCreateParams::model @@ -50,7 +50,7 @@ module OpenAI attr_accessor stop: OpenAI::Models::CompletionCreateParams::stop? - attr_accessor stream_options: OpenAI::Models::Chat::ChatCompletionStreamOptions? + attr_accessor stream_options: OpenAI::Chat::ChatCompletionStreamOptions? attr_accessor suffix: String? @@ -62,78 +62,84 @@ module OpenAI def user=: (String) -> String - def initialize: - ( - model: OpenAI::Models::CompletionCreateParams::model, - prompt: OpenAI::Models::CompletionCreateParams::prompt?, - best_of: Integer?, - echo: bool?, - frequency_penalty: Float?, - logit_bias: ::Hash[Symbol, Integer]?, - logprobs: Integer?, - max_tokens: Integer?, - n: Integer?, - presence_penalty: Float?, - seed: Integer?, - stop: OpenAI::Models::CompletionCreateParams::stop?, - stream_options: OpenAI::Models::Chat::ChatCompletionStreamOptions?, - suffix: String?, - temperature: Float?, - top_p: Float?, - user: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::completion_create_params | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::completion_create_params + def initialize: ( + model: OpenAI::Models::CompletionCreateParams::model, + prompt: OpenAI::Models::CompletionCreateParams::prompt?, + ?best_of: Integer?, + ?echo: bool?, + ?frequency_penalty: Float?, + ?logit_bias: ::Hash[Symbol, Integer]?, + ?logprobs: Integer?, + ?max_tokens: Integer?, + ?n: Integer?, + ?presence_penalty: Float?, + ?seed: Integer?, + ?stop: OpenAI::Models::CompletionCreateParams::stop?, + ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, + ?suffix: String?, + ?temperature: Float?, + ?top_p: Float?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + model: OpenAI::Models::CompletionCreateParams::model, + prompt: OpenAI::Models::CompletionCreateParams::prompt?, + best_of: Integer?, + echo: bool?, + frequency_penalty: Float?, + logit_bias: ::Hash[Symbol, Integer]?, + logprobs: Integer?, + max_tokens: Integer?, + n: Integer?, + presence_penalty: Float?, + seed: Integer?, + stop: OpenAI::Models::CompletionCreateParams::stop?, + stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, + suffix: String?, + temperature: Float?, + top_p: Float?, + user: String, + request_options: OpenAI::RequestOptions + } type model = - String | OpenAI::Models::CompletionCreateParams::Model::preset - - class Model < OpenAI::Union - type preset = - :"gpt-3.5-turbo-instruct" | :"davinci-002" | :"babbage-002" + String | :"gpt-3.5-turbo-instruct" | :"davinci-002" | :"babbage-002" - class Preset < OpenAI::Enum - GPT_3_5_TURBO_INSTRUCT: :"gpt-3.5-turbo-instruct" - DAVINCI_002: :"davinci-002" - BABBAGE_002: :"babbage-002" + module Model + extend OpenAI::Internal::Type::Union - def self.values: -> ::Array[OpenAI::Models::CompletionCreateParams::Model::preset] - end + def self?.variants: -> ::Array[OpenAI::Models::CompletionCreateParams::model] - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::CompletionCreateParams::Model::preset]] + GPT_3_5_TURBO_INSTRUCT: :"gpt-3.5-turbo-instruct" + DAVINCI_002: :"davinci-002" + BABBAGE_002: :"babbage-002" end type prompt = String | ::Array[String] | ::Array[Integer] | ::Array[::Array[Integer]] - class Prompt < OpenAI::Union - type string_array = ::Array[String] - - StringArray: string_array - - type integer_array = ::Array[Integer] + module Prompt + extend OpenAI::Internal::Type::Union - IntegerArray: integer_array + def self?.variants: -> ::Array[OpenAI::Models::CompletionCreateParams::prompt] - type array_of_token2_d_array = ::Array[::Array[Integer]] + StringArray: OpenAI::Internal::Type::Converter - ArrayOfToken2DArray: array_of_token2_d_array + IntegerArray: OpenAI::Internal::Type::Converter - private def self.variants: -> [[nil, String], [nil, ::Array[String]], [nil, ::Array[Integer]], [nil, ::Array[::Array[Integer]]]] + ArrayOfToken2DArray: OpenAI::Internal::Type::Converter end type stop = (String | ::Array[String])? - class Stop < OpenAI::Union - type string_array = ::Array[String] + module Stop + extend OpenAI::Internal::Type::Union - StringArray: string_array + def self?.variants: -> ::Array[OpenAI::Models::CompletionCreateParams::stop] - private def self.variants: -> [[nil, String], [nil, ::Array[String]]] + StringArray: OpenAI::Internal::Type::Converter end end end diff --git a/sig/openai/models/completion_usage.rbs b/sig/openai/models/completion_usage.rbs index 3c334ef6..d70bb65e 100644 --- a/sig/openai/models/completion_usage.rbs +++ b/sig/openai/models/completion_usage.rbs @@ -5,40 +5,44 @@ module OpenAI completion_tokens: Integer, prompt_tokens: Integer, total_tokens: Integer, - completion_tokens_details: OpenAI::Models::CompletionUsage::CompletionTokensDetails, - prompt_tokens_details: OpenAI::Models::CompletionUsage::PromptTokensDetails + completion_tokens_details: OpenAI::CompletionUsage::CompletionTokensDetails, + prompt_tokens_details: OpenAI::CompletionUsage::PromptTokensDetails } - class CompletionUsage < OpenAI::BaseModel + class CompletionUsage < OpenAI::Internal::Type::BaseModel attr_accessor completion_tokens: Integer attr_accessor prompt_tokens: Integer attr_accessor total_tokens: Integer - attr_reader completion_tokens_details: OpenAI::Models::CompletionUsage::CompletionTokensDetails? + attr_reader completion_tokens_details: OpenAI::CompletionUsage::CompletionTokensDetails? def completion_tokens_details=: ( - OpenAI::Models::CompletionUsage::CompletionTokensDetails - ) -> OpenAI::Models::CompletionUsage::CompletionTokensDetails + OpenAI::CompletionUsage::CompletionTokensDetails + ) -> OpenAI::CompletionUsage::CompletionTokensDetails - attr_reader prompt_tokens_details: OpenAI::Models::CompletionUsage::PromptTokensDetails? + attr_reader prompt_tokens_details: OpenAI::CompletionUsage::PromptTokensDetails? def prompt_tokens_details=: ( - OpenAI::Models::CompletionUsage::PromptTokensDetails - ) -> OpenAI::Models::CompletionUsage::PromptTokensDetails - - def initialize: - ( - completion_tokens: Integer, - prompt_tokens: Integer, - total_tokens: Integer, - completion_tokens_details: OpenAI::Models::CompletionUsage::CompletionTokensDetails, - prompt_tokens_details: OpenAI::Models::CompletionUsage::PromptTokensDetails - ) -> void - | (?OpenAI::Models::completion_usage | OpenAI::BaseModel data) -> void + OpenAI::CompletionUsage::PromptTokensDetails + ) -> OpenAI::CompletionUsage::PromptTokensDetails + + def initialize: ( + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer, + ?completion_tokens_details: OpenAI::CompletionUsage::CompletionTokensDetails, + ?prompt_tokens_details: OpenAI::CompletionUsage::PromptTokensDetails + ) -> void - def to_hash: -> OpenAI::Models::completion_usage + def to_hash: -> { + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer, + completion_tokens_details: OpenAI::CompletionUsage::CompletionTokensDetails, + prompt_tokens_details: OpenAI::CompletionUsage::PromptTokensDetails + } type completion_tokens_details = { @@ -48,7 +52,7 @@ module OpenAI rejected_prediction_tokens: Integer } - class CompletionTokensDetails < OpenAI::BaseModel + class CompletionTokensDetails < OpenAI::Internal::Type::BaseModel attr_reader accepted_prediction_tokens: Integer? def accepted_prediction_tokens=: (Integer) -> Integer @@ -65,25 +69,25 @@ module OpenAI def rejected_prediction_tokens=: (Integer) -> Integer - def initialize: - ( - accepted_prediction_tokens: Integer, - audio_tokens: Integer, - reasoning_tokens: Integer, - rejected_prediction_tokens: Integer - ) -> void - | ( - ?OpenAI::Models::CompletionUsage::completion_tokens_details - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::CompletionUsage::completion_tokens_details + def initialize: ( + ?accepted_prediction_tokens: Integer, + ?audio_tokens: Integer, + ?reasoning_tokens: Integer, + ?rejected_prediction_tokens: Integer + ) -> void + + def to_hash: -> { + accepted_prediction_tokens: Integer, + audio_tokens: Integer, + reasoning_tokens: Integer, + rejected_prediction_tokens: Integer + } end type prompt_tokens_details = { audio_tokens: Integer, cached_tokens: Integer } - class PromptTokensDetails < OpenAI::BaseModel + class PromptTokensDetails < OpenAI::Internal::Type::BaseModel attr_reader audio_tokens: Integer? def audio_tokens=: (Integer) -> Integer @@ -92,14 +96,12 @@ module OpenAI def cached_tokens=: (Integer) -> Integer - def initialize: - (audio_tokens: Integer, cached_tokens: Integer) -> void - | ( - ?OpenAI::Models::CompletionUsage::prompt_tokens_details - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?audio_tokens: Integer, + ?cached_tokens: Integer + ) -> void - def to_hash: -> OpenAI::Models::CompletionUsage::prompt_tokens_details + def to_hash: -> { audio_tokens: Integer, cached_tokens: Integer } end end end diff --git a/sig/openai/models/compound_filter.rbs b/sig/openai/models/compound_filter.rbs index 620ab14b..ccf1036d 100644 --- a/sig/openai/models/compound_filter.rbs +++ b/sig/openai/models/compound_filter.rbs @@ -6,33 +6,38 @@ module OpenAI type: OpenAI::Models::CompoundFilter::type_ } - class CompoundFilter < OpenAI::BaseModel + class CompoundFilter < OpenAI::Internal::Type::BaseModel attr_accessor filters: ::Array[OpenAI::Models::CompoundFilter::filter] attr_accessor type: OpenAI::Models::CompoundFilter::type_ - def initialize: - ( - filters: ::Array[OpenAI::Models::CompoundFilter::filter], - type: OpenAI::Models::CompoundFilter::type_ - ) -> void - | (?OpenAI::Models::compound_filter | OpenAI::BaseModel data) -> void + def initialize: ( + filters: ::Array[OpenAI::Models::CompoundFilter::filter], + type: OpenAI::Models::CompoundFilter::type_ + ) -> void - def to_hash: -> OpenAI::Models::compound_filter + def to_hash: -> { + filters: ::Array[OpenAI::Models::CompoundFilter::filter], + type: OpenAI::Models::CompoundFilter::type_ + } + + type filter = OpenAI::ComparisonFilter | top - type filter = OpenAI::Models::ComparisonFilter | top + module Filter + extend OpenAI::Internal::Type::Union - class Filter < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::ComparisonFilter], [nil, top]] + def self?.variants: -> ::Array[OpenAI::Models::CompoundFilter::filter] end type type_ = :and | :or - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + AND: :and OR: :or - def self.values: -> ::Array[OpenAI::Models::CompoundFilter::type_] + def self?.values: -> ::Array[OpenAI::Models::CompoundFilter::type_] end end end diff --git a/sig/openai/models/container_create_params.rbs b/sig/openai/models/container_create_params.rbs new file mode 100644 index 00000000..3c51ec67 --- /dev/null +++ b/sig/openai/models/container_create_params.rbs @@ -0,0 +1,74 @@ +module OpenAI + module Models + type container_create_params = + { + name: String, + expires_after: OpenAI::ContainerCreateParams::ExpiresAfter, + file_ids: ::Array[String] + } + & OpenAI::Internal::Type::request_parameters + + class ContainerCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor name: String + + attr_reader expires_after: OpenAI::ContainerCreateParams::ExpiresAfter? + + def expires_after=: ( + OpenAI::ContainerCreateParams::ExpiresAfter + ) -> OpenAI::ContainerCreateParams::ExpiresAfter + + attr_reader file_ids: ::Array[String]? + + def file_ids=: (::Array[String]) -> ::Array[String] + + def initialize: ( + name: String, + ?expires_after: OpenAI::ContainerCreateParams::ExpiresAfter, + ?file_ids: ::Array[String], + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + name: String, + expires_after: OpenAI::ContainerCreateParams::ExpiresAfter, + file_ids: ::Array[String], + request_options: OpenAI::RequestOptions + } + + type expires_after = + { + anchor: OpenAI::Models::ContainerCreateParams::ExpiresAfter::anchor, + minutes: Integer + } + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + attr_accessor anchor: OpenAI::Models::ContainerCreateParams::ExpiresAfter::anchor + + attr_accessor minutes: Integer + + def initialize: ( + anchor: OpenAI::Models::ContainerCreateParams::ExpiresAfter::anchor, + minutes: Integer + ) -> void + + def to_hash: -> { + anchor: OpenAI::Models::ContainerCreateParams::ExpiresAfter::anchor, + minutes: Integer + } + + type anchor = :last_active_at + + module Anchor + extend OpenAI::Internal::Type::Enum + + LAST_ACTIVE_AT: :last_active_at + + def self?.values: -> ::Array[OpenAI::Models::ContainerCreateParams::ExpiresAfter::anchor] + end + end + end + end +end diff --git a/sig/openai/models/container_create_response.rbs b/sig/openai/models/container_create_response.rbs new file mode 100644 index 00000000..ddc8f23d --- /dev/null +++ b/sig/openai/models/container_create_response.rbs @@ -0,0 +1,87 @@ +module OpenAI + module Models + type container_create_response = + { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter + } + + class ContainerCreateResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor name: String + + attr_accessor object: String + + attr_accessor status: String + + attr_reader expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter? + + def expires_after=: ( + OpenAI::Models::ContainerCreateResponse::ExpiresAfter + ) -> OpenAI::Models::ContainerCreateResponse::ExpiresAfter + + def initialize: ( + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + ?expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter + } + + type expires_after = + { + anchor: OpenAI::Models::ContainerCreateResponse::ExpiresAfter::anchor, + minutes: Integer + } + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + attr_reader anchor: OpenAI::Models::ContainerCreateResponse::ExpiresAfter::anchor? + + def anchor=: ( + OpenAI::Models::ContainerCreateResponse::ExpiresAfter::anchor + ) -> OpenAI::Models::ContainerCreateResponse::ExpiresAfter::anchor + + attr_reader minutes: Integer? + + def minutes=: (Integer) -> Integer + + def initialize: ( + ?anchor: OpenAI::Models::ContainerCreateResponse::ExpiresAfter::anchor, + ?minutes: Integer + ) -> void + + def to_hash: -> { + anchor: OpenAI::Models::ContainerCreateResponse::ExpiresAfter::anchor, + minutes: Integer + } + + type anchor = :last_active_at + + module Anchor + extend OpenAI::Internal::Type::Enum + + LAST_ACTIVE_AT: :last_active_at + + def self?.values: -> ::Array[OpenAI::Models::ContainerCreateResponse::ExpiresAfter::anchor] + end + end + end + end +end diff --git a/sig/openai/models/container_delete_params.rbs b/sig/openai/models/container_delete_params.rbs new file mode 100644 index 00000000..940b54a7 --- /dev/null +++ b/sig/openai/models/container_delete_params.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + type container_delete_params = + { } & OpenAI::Internal::Type::request_parameters + + class ContainerDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + def initialize: (?request_options: OpenAI::request_opts) -> void + + def to_hash: -> { request_options: OpenAI::RequestOptions } + end + end +end diff --git a/sig/openai/models/container_list_params.rbs b/sig/openai/models/container_list_params.rbs new file mode 100644 index 00000000..ec6092bb --- /dev/null +++ b/sig/openai/models/container_list_params.rbs @@ -0,0 +1,55 @@ +module OpenAI + module Models + type container_list_params = + { + after: String, + limit: Integer, + order: OpenAI::Models::ContainerListParams::order + } + & OpenAI::Internal::Type::request_parameters + + class ContainerListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_reader after: String? + + def after=: (String) -> String + + attr_reader limit: Integer? + + def limit=: (Integer) -> Integer + + attr_reader order: OpenAI::Models::ContainerListParams::order? + + def order=: ( + OpenAI::Models::ContainerListParams::order + ) -> OpenAI::Models::ContainerListParams::order + + def initialize: ( + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::ContainerListParams::order, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + limit: Integer, + order: OpenAI::Models::ContainerListParams::order, + request_options: OpenAI::RequestOptions + } + + type order = :asc | :desc + + module Order + extend OpenAI::Internal::Type::Enum + + ASC: :asc + DESC: :desc + + def self?.values: -> ::Array[OpenAI::Models::ContainerListParams::order] + end + end + end +end diff --git a/sig/openai/models/container_list_response.rbs b/sig/openai/models/container_list_response.rbs new file mode 100644 index 00000000..97f971f0 --- /dev/null +++ b/sig/openai/models/container_list_response.rbs @@ -0,0 +1,87 @@ +module OpenAI + module Models + type container_list_response = + { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter + } + + class ContainerListResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor name: String + + attr_accessor object: String + + attr_accessor status: String + + attr_reader expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter? + + def expires_after=: ( + OpenAI::Models::ContainerListResponse::ExpiresAfter + ) -> OpenAI::Models::ContainerListResponse::ExpiresAfter + + def initialize: ( + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + ?expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter + } + + type expires_after = + { + anchor: OpenAI::Models::ContainerListResponse::ExpiresAfter::anchor, + minutes: Integer + } + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + attr_reader anchor: OpenAI::Models::ContainerListResponse::ExpiresAfter::anchor? + + def anchor=: ( + OpenAI::Models::ContainerListResponse::ExpiresAfter::anchor + ) -> OpenAI::Models::ContainerListResponse::ExpiresAfter::anchor + + attr_reader minutes: Integer? + + def minutes=: (Integer) -> Integer + + def initialize: ( + ?anchor: OpenAI::Models::ContainerListResponse::ExpiresAfter::anchor, + ?minutes: Integer + ) -> void + + def to_hash: -> { + anchor: OpenAI::Models::ContainerListResponse::ExpiresAfter::anchor, + minutes: Integer + } + + type anchor = :last_active_at + + module Anchor + extend OpenAI::Internal::Type::Enum + + LAST_ACTIVE_AT: :last_active_at + + def self?.values: -> ::Array[OpenAI::Models::ContainerListResponse::ExpiresAfter::anchor] + end + end + end + end +end diff --git a/sig/openai/models/container_retrieve_params.rbs b/sig/openai/models/container_retrieve_params.rbs new file mode 100644 index 00000000..74ca0b0c --- /dev/null +++ b/sig/openai/models/container_retrieve_params.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + type container_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters + + class ContainerRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + def initialize: (?request_options: OpenAI::request_opts) -> void + + def to_hash: -> { request_options: OpenAI::RequestOptions } + end + end +end diff --git a/sig/openai/models/container_retrieve_response.rbs b/sig/openai/models/container_retrieve_response.rbs new file mode 100644 index 00000000..fac17ec3 --- /dev/null +++ b/sig/openai/models/container_retrieve_response.rbs @@ -0,0 +1,87 @@ +module OpenAI + module Models + type container_retrieve_response = + { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter + } + + class ContainerRetrieveResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor name: String + + attr_accessor object: String + + attr_accessor status: String + + attr_reader expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter? + + def expires_after=: ( + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter + ) -> OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter + + def initialize: ( + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + ?expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter + } + + type expires_after = + { + anchor: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::anchor, + minutes: Integer + } + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + attr_reader anchor: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::anchor? + + def anchor=: ( + OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::anchor + ) -> OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::anchor + + attr_reader minutes: Integer? + + def minutes=: (Integer) -> Integer + + def initialize: ( + ?anchor: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::anchor, + ?minutes: Integer + ) -> void + + def to_hash: -> { + anchor: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::anchor, + minutes: Integer + } + + type anchor = :last_active_at + + module Anchor + extend OpenAI::Internal::Type::Enum + + LAST_ACTIVE_AT: :last_active_at + + def self?.values: -> ::Array[OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::anchor] + end + end + end + end +end diff --git a/sig/openai/models/containers/file_create_params.rbs b/sig/openai/models/containers/file_create_params.rbs new file mode 100644 index 00000000..8ebe9ad8 --- /dev/null +++ b/sig/openai/models/containers/file_create_params.rbs @@ -0,0 +1,36 @@ +module OpenAI + module Models + module Containers + type file_create_params = + { file: OpenAI::Internal::file_input, file_id: String } + & OpenAI::Internal::Type::request_parameters + + class FileCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_reader file: OpenAI::Internal::file_input? + + def file=: ( + OpenAI::Internal::file_input + ) -> OpenAI::Internal::file_input + + attr_reader file_id: String? + + def file_id=: (String) -> String + + def initialize: ( + ?file: OpenAI::Internal::file_input, + ?file_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + file: OpenAI::Internal::file_input, + file_id: String, + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/containers/file_create_response.rbs b/sig/openai/models/containers/file_create_response.rbs new file mode 100644 index 00000000..ccb96c98 --- /dev/null +++ b/sig/openai/models/containers/file_create_response.rbs @@ -0,0 +1,52 @@ +module OpenAI + module Models + module Containers + type file_create_response = + { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: :"container.file", + path: String, + source: String + } + + class FileCreateResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor bytes: Integer + + attr_accessor container_id: String + + attr_accessor created_at: Integer + + attr_accessor object: :"container.file" + + attr_accessor path: String + + attr_accessor source: String + + def initialize: ( + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + path: String, + source: String, + ?object: :"container.file" + ) -> void + + def to_hash: -> { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: :"container.file", + path: String, + source: String + } + end + end + end +end diff --git a/sig/openai/models/containers/file_delete_params.rbs b/sig/openai/models/containers/file_delete_params.rbs new file mode 100644 index 00000000..b8cd479f --- /dev/null +++ b/sig/openai/models/containers/file_delete_params.rbs @@ -0,0 +1,25 @@ +module OpenAI + module Models + module Containers + type file_delete_params = + { container_id: String } & OpenAI::Internal::Type::request_parameters + + class FileDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor container_id: String + + def initialize: ( + container_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + container_id: String, + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/containers/file_list_params.rbs b/sig/openai/models/containers/file_list_params.rbs new file mode 100644 index 00000000..8ea4c3aa --- /dev/null +++ b/sig/openai/models/containers/file_list_params.rbs @@ -0,0 +1,57 @@ +module OpenAI + module Models + module Containers + type file_list_params = + { + after: String, + limit: Integer, + order: OpenAI::Models::Containers::FileListParams::order + } + & OpenAI::Internal::Type::request_parameters + + class FileListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_reader after: String? + + def after=: (String) -> String + + attr_reader limit: Integer? + + def limit=: (Integer) -> Integer + + attr_reader order: OpenAI::Models::Containers::FileListParams::order? + + def order=: ( + OpenAI::Models::Containers::FileListParams::order + ) -> OpenAI::Models::Containers::FileListParams::order + + def initialize: ( + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::Containers::FileListParams::order, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + limit: Integer, + order: OpenAI::Models::Containers::FileListParams::order, + request_options: OpenAI::RequestOptions + } + + type order = :asc | :desc + + module Order + extend OpenAI::Internal::Type::Enum + + ASC: :asc + DESC: :desc + + def self?.values: -> ::Array[OpenAI::Models::Containers::FileListParams::order] + end + end + end + end +end diff --git a/sig/openai/models/containers/file_list_response.rbs b/sig/openai/models/containers/file_list_response.rbs new file mode 100644 index 00000000..1314ef0f --- /dev/null +++ b/sig/openai/models/containers/file_list_response.rbs @@ -0,0 +1,52 @@ +module OpenAI + module Models + module Containers + type file_list_response = + { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: :"container.file", + path: String, + source: String + } + + class FileListResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor bytes: Integer + + attr_accessor container_id: String + + attr_accessor created_at: Integer + + attr_accessor object: :"container.file" + + attr_accessor path: String + + attr_accessor source: String + + def initialize: ( + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + path: String, + source: String, + ?object: :"container.file" + ) -> void + + def to_hash: -> { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: :"container.file", + path: String, + source: String + } + end + end + end +end diff --git a/sig/openai/models/containers/file_retrieve_params.rbs b/sig/openai/models/containers/file_retrieve_params.rbs new file mode 100644 index 00000000..aba88985 --- /dev/null +++ b/sig/openai/models/containers/file_retrieve_params.rbs @@ -0,0 +1,25 @@ +module OpenAI + module Models + module Containers + type file_retrieve_params = + { container_id: String } & OpenAI::Internal::Type::request_parameters + + class FileRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor container_id: String + + def initialize: ( + container_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + container_id: String, + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/containers/file_retrieve_response.rbs b/sig/openai/models/containers/file_retrieve_response.rbs new file mode 100644 index 00000000..33e75b90 --- /dev/null +++ b/sig/openai/models/containers/file_retrieve_response.rbs @@ -0,0 +1,52 @@ +module OpenAI + module Models + module Containers + type file_retrieve_response = + { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: :"container.file", + path: String, + source: String + } + + class FileRetrieveResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor bytes: Integer + + attr_accessor container_id: String + + attr_accessor created_at: Integer + + attr_accessor object: :"container.file" + + attr_accessor path: String + + attr_accessor source: String + + def initialize: ( + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + path: String, + source: String, + ?object: :"container.file" + ) -> void + + def to_hash: -> { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: :"container.file", + path: String, + source: String + } + end + end + end +end diff --git a/sig/openai/models/containers/files/content_retrieve_params.rbs b/sig/openai/models/containers/files/content_retrieve_params.rbs new file mode 100644 index 00000000..8912648d --- /dev/null +++ b/sig/openai/models/containers/files/content_retrieve_params.rbs @@ -0,0 +1,27 @@ +module OpenAI + module Models + module Containers + module Files + type content_retrieve_params = + { container_id: String } & OpenAI::Internal::Type::request_parameters + + class ContentRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor container_id: String + + def initialize: ( + container_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + container_id: String, + request_options: OpenAI::RequestOptions + } + end + end + end + end +end diff --git a/sig/openai/models/conversations/computer_screenshot_content.rbs b/sig/openai/models/conversations/computer_screenshot_content.rbs new file mode 100644 index 00000000..f0485edf --- /dev/null +++ b/sig/openai/models/conversations/computer_screenshot_content.rbs @@ -0,0 +1,28 @@ +module OpenAI + module Models + module Conversations + type computer_screenshot_content = + { file_id: String?, image_url: String?, type: :computer_screenshot } + + class ComputerScreenshotContent < OpenAI::Internal::Type::BaseModel + attr_accessor file_id: String? + + attr_accessor image_url: String? + + attr_accessor type: :computer_screenshot + + def initialize: ( + file_id: String?, + image_url: String?, + ?type: :computer_screenshot + ) -> void + + def to_hash: -> { + file_id: String?, + image_url: String?, + type: :computer_screenshot + } + end + end + end +end diff --git a/sig/openai/models/conversations/container_file_citation_body.rbs b/sig/openai/models/conversations/container_file_citation_body.rbs new file mode 100644 index 00000000..c8d856d0 --- /dev/null +++ b/sig/openai/models/conversations/container_file_citation_body.rbs @@ -0,0 +1,47 @@ +module OpenAI + module Models + module Conversations + type container_file_citation_body = + { + container_id: String, + end_index: Integer, + file_id: String, + filename: String, + start_index: Integer, + type: :container_file_citation + } + + class ContainerFileCitationBody < OpenAI::Internal::Type::BaseModel + attr_accessor container_id: String + + attr_accessor end_index: Integer + + attr_accessor file_id: String + + attr_accessor filename: String + + attr_accessor start_index: Integer + + attr_accessor type: :container_file_citation + + def initialize: ( + container_id: String, + end_index: Integer, + file_id: String, + filename: String, + start_index: Integer, + ?type: :container_file_citation + ) -> void + + def to_hash: -> { + container_id: String, + end_index: Integer, + file_id: String, + filename: String, + start_index: Integer, + type: :container_file_citation + } + end + end + end +end diff --git a/sig/openai/models/conversations/conversation.rbs b/sig/openai/models/conversations/conversation.rbs new file mode 100644 index 00000000..9648f0fc --- /dev/null +++ b/sig/openai/models/conversations/conversation.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Conversations + type conversation = + { + id: String, + created_at: Integer, + metadata: top, + object: :conversation + } + + class Conversation < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor metadata: top + + attr_accessor object: :conversation + + def initialize: ( + id: String, + created_at: Integer, + metadata: top, + ?object: :conversation + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + metadata: top, + object: :conversation + } + end + end + end +end diff --git a/sig/openai/models/conversations/conversation_create_params.rbs b/sig/openai/models/conversations/conversation_create_params.rbs new file mode 100644 index 00000000..e4152ec4 --- /dev/null +++ b/sig/openai/models/conversations/conversation_create_params.rbs @@ -0,0 +1,33 @@ +module OpenAI + module Models + module Conversations + type conversation_create_params = + { + items: ::Array[OpenAI::Models::Responses::response_input_item]?, + metadata: OpenAI::Models::metadata? + } + & OpenAI::Internal::Type::request_parameters + + class ConversationCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor items: ::Array[OpenAI::Models::Responses::response_input_item]? + + attr_accessor metadata: OpenAI::Models::metadata? + + def initialize: ( + ?items: ::Array[OpenAI::Models::Responses::response_input_item]?, + ?metadata: OpenAI::Models::metadata?, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + items: ::Array[OpenAI::Models::Responses::response_input_item]?, + metadata: OpenAI::Models::metadata?, + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/conversations/conversation_delete_params.rbs b/sig/openai/models/conversations/conversation_delete_params.rbs new file mode 100644 index 00000000..075734ce --- /dev/null +++ b/sig/openai/models/conversations/conversation_delete_params.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Conversations + type conversation_delete_params = + { } & OpenAI::Internal::Type::request_parameters + + class ConversationDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + def initialize: (?request_options: OpenAI::request_opts) -> void + + def to_hash: -> { request_options: OpenAI::RequestOptions } + end + end + end +end diff --git a/sig/openai/models/conversations/conversation_deleted.rbs b/sig/openai/models/conversations/conversation_deleted.rbs new file mode 100644 index 00000000..67550c38 --- /dev/null +++ b/sig/openai/models/conversations/conversation_deleted.rbs @@ -0,0 +1,28 @@ +module OpenAI + module Models + module Conversations + type conversation_deleted = + { id: String, deleted: bool, object: :"conversation.deleted" } + + class ConversationDeleted < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor deleted: bool + + attr_accessor object: :"conversation.deleted" + + def initialize: ( + id: String, + deleted: bool, + ?object: :"conversation.deleted" + ) -> void + + def to_hash: -> { + id: String, + deleted: bool, + object: :"conversation.deleted" + } + end + end + end +end diff --git a/sig/openai/models/conversations/conversation_deleted_resource.rbs b/sig/openai/models/conversations/conversation_deleted_resource.rbs new file mode 100644 index 00000000..b0ae90b2 --- /dev/null +++ b/sig/openai/models/conversations/conversation_deleted_resource.rbs @@ -0,0 +1,28 @@ +module OpenAI + module Models + module Conversations + type conversation_deleted_resource = + { id: String, deleted: bool, object: :"conversation.deleted" } + + class ConversationDeletedResource < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor deleted: bool + + attr_accessor object: :"conversation.deleted" + + def initialize: ( + id: String, + deleted: bool, + ?object: :"conversation.deleted" + ) -> void + + def to_hash: -> { + id: String, + deleted: bool, + object: :"conversation.deleted" + } + end + end + end +end diff --git a/sig/openai/models/conversations/conversation_item.rbs b/sig/openai/models/conversations/conversation_item.rbs new file mode 100644 index 00000000..f4c07482 --- /dev/null +++ b/sig/openai/models/conversations/conversation_item.rbs @@ -0,0 +1,403 @@ +module OpenAI + module Models + module ConversationItem = Conversations::ConversationItem + + module Conversations + type conversation_item = + OpenAI::Conversations::Message + | OpenAI::Responses::ResponseFunctionToolCallItem + | OpenAI::Responses::ResponseFunctionToolCallOutputItem + | OpenAI::Responses::ResponseFileSearchToolCall + | OpenAI::Responses::ResponseFunctionWebSearch + | OpenAI::Conversations::ConversationItem::ImageGenerationCall + | OpenAI::Responses::ResponseComputerToolCall + | OpenAI::Responses::ResponseComputerToolCallOutputItem + | OpenAI::Responses::ResponseReasoningItem + | OpenAI::Responses::ResponseCodeInterpreterToolCall + | OpenAI::Conversations::ConversationItem::LocalShellCall + | OpenAI::Conversations::ConversationItem::LocalShellCallOutput + | OpenAI::Conversations::ConversationItem::McpListTools + | OpenAI::Conversations::ConversationItem::McpApprovalRequest + | OpenAI::Conversations::ConversationItem::McpApprovalResponse + | OpenAI::Conversations::ConversationItem::McpCall + | OpenAI::Responses::ResponseCustomToolCall + | OpenAI::Responses::ResponseCustomToolCallOutput + + module ConversationItem + extend OpenAI::Internal::Type::Union + + type image_generation_call = + { + id: String, + result: String?, + status: OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall::status, + type: :image_generation_call + } + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor result: String? + + attr_accessor status: OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall::status + + attr_accessor type: :image_generation_call + + def initialize: ( + id: String, + result: String?, + status: OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall::status, + ?type: :image_generation_call + ) -> void + + def to_hash: -> { + id: String, + result: String?, + status: OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall::status, + type: :image_generation_call + } + + type status = :in_progress | :completed | :generating | :failed + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + GENERATING: :generating + FAILED: :failed + + def self?.values: -> ::Array[OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall::status] + end + end + + type local_shell_call = + { + id: String, + action: OpenAI::Conversations::ConversationItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Conversations::ConversationItem::LocalShellCall::status, + type: :local_shell_call + } + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor action: OpenAI::Conversations::ConversationItem::LocalShellCall::Action + + attr_accessor call_id: String + + attr_accessor status: OpenAI::Models::Conversations::ConversationItem::LocalShellCall::status + + attr_accessor type: :local_shell_call + + def initialize: ( + id: String, + action: OpenAI::Conversations::ConversationItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Conversations::ConversationItem::LocalShellCall::status, + ?type: :local_shell_call + ) -> void + + def to_hash: -> { + id: String, + action: OpenAI::Conversations::ConversationItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Conversations::ConversationItem::LocalShellCall::status, + type: :local_shell_call + } + + type action = + { + command: ::Array[String], + env: ::Hash[Symbol, String], + type: :exec, + timeout_ms: Integer?, + user: String?, + working_directory: String? + } + + class Action < OpenAI::Internal::Type::BaseModel + attr_accessor command: ::Array[String] + + attr_accessor env: ::Hash[Symbol, String] + + attr_accessor type: :exec + + attr_accessor timeout_ms: Integer? + + attr_accessor user: String? + + attr_accessor working_directory: String? + + def initialize: ( + command: ::Array[String], + env: ::Hash[Symbol, String], + ?timeout_ms: Integer?, + ?user: String?, + ?working_directory: String?, + ?type: :exec + ) -> void + + def to_hash: -> { + command: ::Array[String], + env: ::Hash[Symbol, String], + type: :exec, + timeout_ms: Integer?, + user: String?, + working_directory: String? + } + end + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Conversations::ConversationItem::LocalShellCall::status] + end + end + + type local_shell_call_output = + { + id: String, + output: String, + type: :local_shell_call_output, + status: OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput::status? + } + + class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor output: String + + attr_accessor type: :local_shell_call_output + + attr_accessor status: OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput::status? + + def initialize: ( + id: String, + output: String, + ?status: OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput::status?, + ?type: :local_shell_call_output + ) -> void + + def to_hash: -> { + id: String, + output: String, + type: :local_shell_call_output, + status: OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput::status? + } + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput::status] + end + end + + type mcp_list_tools = + { + id: String, + server_label: String, + tools: ::Array[OpenAI::Conversations::ConversationItem::McpListTools::Tool], + type: :mcp_list_tools, + error: String? + } + + class McpListTools < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor server_label: String + + attr_accessor tools: ::Array[OpenAI::Conversations::ConversationItem::McpListTools::Tool] + + attr_accessor type: :mcp_list_tools + + attr_accessor error: String? + + def initialize: ( + id: String, + server_label: String, + tools: ::Array[OpenAI::Conversations::ConversationItem::McpListTools::Tool], + ?error: String?, + ?type: :mcp_list_tools + ) -> void + + def to_hash: -> { + id: String, + server_label: String, + tools: ::Array[OpenAI::Conversations::ConversationItem::McpListTools::Tool], + type: :mcp_list_tools, + error: String? + } + + type tool = + { + input_schema: top, + name: String, + annotations: top?, + description: String? + } + + class Tool < OpenAI::Internal::Type::BaseModel + attr_accessor input_schema: top + + attr_accessor name: String + + attr_accessor annotations: top? + + attr_accessor description: String? + + def initialize: ( + input_schema: top, + name: String, + ?annotations: top?, + ?description: String? + ) -> void + + def to_hash: -> { + input_schema: top, + name: String, + annotations: top?, + description: String? + } + end + end + + type mcp_approval_request = + { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_approval_request + } + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor arguments: String + + attr_accessor name: String + + attr_accessor server_label: String + + attr_accessor type: :mcp_approval_request + + def initialize: ( + id: String, + arguments: String, + name: String, + server_label: String, + ?type: :mcp_approval_request + ) -> void + + def to_hash: -> { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_approval_request + } + end + + type mcp_approval_response = + { + id: String, + approval_request_id: String, + approve: bool, + type: :mcp_approval_response, + reason: String? + } + + class McpApprovalResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor approval_request_id: String + + attr_accessor approve: bool + + attr_accessor type: :mcp_approval_response + + attr_accessor reason: String? + + def initialize: ( + id: String, + approval_request_id: String, + approve: bool, + ?reason: String?, + ?type: :mcp_approval_response + ) -> void + + def to_hash: -> { + id: String, + approval_request_id: String, + approve: bool, + type: :mcp_approval_response, + reason: String? + } + end + + type mcp_call = + { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_call, + error: String?, + output: String? + } + + class McpCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor arguments: String + + attr_accessor name: String + + attr_accessor server_label: String + + attr_accessor type: :mcp_call + + attr_accessor error: String? + + attr_accessor output: String? + + def initialize: ( + id: String, + arguments: String, + name: String, + server_label: String, + ?error: String?, + ?output: String?, + ?type: :mcp_call + ) -> void + + def to_hash: -> { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_call, + error: String?, + output: String? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Conversations::conversation_item] + end + end + end +end diff --git a/sig/openai/models/conversations/conversation_item_list.rbs b/sig/openai/models/conversations/conversation_item_list.rbs new file mode 100644 index 00000000..9cab46d2 --- /dev/null +++ b/sig/openai/models/conversations/conversation_item_list.rbs @@ -0,0 +1,44 @@ +module OpenAI + module Models + class ConversationItemList = Conversations::ConversationItemList + + module Conversations + type conversation_item_list = + { + data: ::Array[OpenAI::Models::Conversations::conversation_item], + first_id: String, + has_more: bool, + last_id: String, + object: :list + } + + class ConversationItemList < OpenAI::Internal::Type::BaseModel + attr_accessor data: ::Array[OpenAI::Models::Conversations::conversation_item] + + attr_accessor first_id: String + + attr_accessor has_more: bool + + attr_accessor last_id: String + + attr_accessor object: :list + + def initialize: ( + data: ::Array[OpenAI::Models::Conversations::conversation_item], + first_id: String, + has_more: bool, + last_id: String, + ?object: :list + ) -> void + + def to_hash: -> { + data: ::Array[OpenAI::Models::Conversations::conversation_item], + first_id: String, + has_more: bool, + last_id: String, + object: :list + } + end + end + end +end diff --git a/sig/openai/models/conversations/conversation_retrieve_params.rbs b/sig/openai/models/conversations/conversation_retrieve_params.rbs new file mode 100644 index 00000000..5cf1fb84 --- /dev/null +++ b/sig/openai/models/conversations/conversation_retrieve_params.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Conversations + type conversation_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters + + class ConversationRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + def initialize: (?request_options: OpenAI::request_opts) -> void + + def to_hash: -> { request_options: OpenAI::RequestOptions } + end + end + end +end diff --git a/sig/openai/models/conversations/conversation_update_params.rbs b/sig/openai/models/conversations/conversation_update_params.rbs new file mode 100644 index 00000000..cff724e5 --- /dev/null +++ b/sig/openai/models/conversations/conversation_update_params.rbs @@ -0,0 +1,26 @@ +module OpenAI + module Models + module Conversations + type conversation_update_params = + { metadata: ::Hash[Symbol, String] } + & OpenAI::Internal::Type::request_parameters + + class ConversationUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor metadata: ::Hash[Symbol, String] + + def initialize: ( + metadata: ::Hash[Symbol, String], + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + metadata: ::Hash[Symbol, String], + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/conversations/file_citation_body.rbs b/sig/openai/models/conversations/file_citation_body.rbs new file mode 100644 index 00000000..cfdd97ac --- /dev/null +++ b/sig/openai/models/conversations/file_citation_body.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Conversations + type file_citation_body = + { + file_id: String, + filename: String, + index: Integer, + type: :file_citation + } + + class FileCitationBody < OpenAI::Internal::Type::BaseModel + attr_accessor file_id: String + + attr_accessor filename: String + + attr_accessor index: Integer + + attr_accessor type: :file_citation + + def initialize: ( + file_id: String, + filename: String, + index: Integer, + ?type: :file_citation + ) -> void + + def to_hash: -> { + file_id: String, + filename: String, + index: Integer, + type: :file_citation + } + end + end + end +end diff --git a/sig/openai/models/conversations/input_file_content.rbs b/sig/openai/models/conversations/input_file_content.rbs new file mode 100644 index 00000000..69739208 --- /dev/null +++ b/sig/openai/models/conversations/input_file_content.rbs @@ -0,0 +1,41 @@ +module OpenAI + module Models + module Conversations + type input_file_content = + { + file_id: String?, + type: :input_file, + file_url: String, + filename: String + } + + class InputFileContent < OpenAI::Internal::Type::BaseModel + attr_accessor file_id: String? + + attr_accessor type: :input_file + + attr_reader file_url: String? + + def file_url=: (String) -> String + + attr_reader filename: String? + + def filename=: (String) -> String + + def initialize: ( + file_id: String?, + ?file_url: String, + ?filename: String, + ?type: :input_file + ) -> void + + def to_hash: -> { + file_id: String?, + type: :input_file, + file_url: String, + filename: String + } + end + end + end +end diff --git a/sig/openai/models/conversations/input_image_content.rbs b/sig/openai/models/conversations/input_image_content.rbs new file mode 100644 index 00000000..bfd75dcf --- /dev/null +++ b/sig/openai/models/conversations/input_image_content.rbs @@ -0,0 +1,49 @@ +module OpenAI + module Models + module Conversations + type input_image_content = + { + detail: OpenAI::Models::Conversations::InputImageContent::detail, + file_id: String?, + image_url: String?, + type: :input_image + } + + class InputImageContent < OpenAI::Internal::Type::BaseModel + attr_accessor detail: OpenAI::Models::Conversations::InputImageContent::detail + + attr_accessor file_id: String? + + attr_accessor image_url: String? + + attr_accessor type: :input_image + + def initialize: ( + detail: OpenAI::Models::Conversations::InputImageContent::detail, + file_id: String?, + image_url: String?, + ?type: :input_image + ) -> void + + def to_hash: -> { + detail: OpenAI::Models::Conversations::InputImageContent::detail, + file_id: String?, + image_url: String?, + type: :input_image + } + + type detail = :low | :high | :auto + + module Detail + extend OpenAI::Internal::Type::Enum + + LOW: :low + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::Conversations::InputImageContent::detail] + end + end + end + end +end diff --git a/sig/openai/models/conversations/input_text_content.rbs b/sig/openai/models/conversations/input_text_content.rbs new file mode 100644 index 00000000..59155bd2 --- /dev/null +++ b/sig/openai/models/conversations/input_text_content.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Conversations + type input_text_content = { text: String, type: :input_text } + + class InputTextContent < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :input_text + + def initialize: (text: String, ?type: :input_text) -> void + + def to_hash: -> { text: String, type: :input_text } + end + end + end +end diff --git a/sig/openai/models/conversations/item_create_params.rbs b/sig/openai/models/conversations/item_create_params.rbs new file mode 100644 index 00000000..108ef141 --- /dev/null +++ b/sig/openai/models/conversations/item_create_params.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Conversations + type item_create_params = + { + items: ::Array[OpenAI::Models::Responses::response_input_item], + include: ::Array[OpenAI::Models::Responses::response_includable] + } + & OpenAI::Internal::Type::request_parameters + + class ItemCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor items: ::Array[OpenAI::Models::Responses::response_input_item] + + attr_reader include: ::Array[OpenAI::Models::Responses::response_includable]? + + def include=: ( + ::Array[OpenAI::Models::Responses::response_includable] + ) -> ::Array[OpenAI::Models::Responses::response_includable] + + def initialize: ( + items: ::Array[OpenAI::Models::Responses::response_input_item], + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + items: ::Array[OpenAI::Models::Responses::response_input_item], + include: ::Array[OpenAI::Models::Responses::response_includable], + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/conversations/item_delete_params.rbs b/sig/openai/models/conversations/item_delete_params.rbs new file mode 100644 index 00000000..fb864cb8 --- /dev/null +++ b/sig/openai/models/conversations/item_delete_params.rbs @@ -0,0 +1,25 @@ +module OpenAI + module Models + module Conversations + type item_delete_params = + { conversation_id: String } & OpenAI::Internal::Type::request_parameters + + class ItemDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor conversation_id: String + + def initialize: ( + conversation_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + conversation_id: String, + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/conversations/item_list_params.rbs b/sig/openai/models/conversations/item_list_params.rbs new file mode 100644 index 00000000..ca693e07 --- /dev/null +++ b/sig/openai/models/conversations/item_list_params.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Conversations + type item_list_params = + { + after: String, + include: ::Array[OpenAI::Models::Responses::response_includable], + limit: Integer, + order: OpenAI::Models::Conversations::ItemListParams::order + } + & OpenAI::Internal::Type::request_parameters + + class ItemListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_reader after: String? + + def after=: (String) -> String + + attr_reader include: ::Array[OpenAI::Models::Responses::response_includable]? + + def include=: ( + ::Array[OpenAI::Models::Responses::response_includable] + ) -> ::Array[OpenAI::Models::Responses::response_includable] + + attr_reader limit: Integer? + + def limit=: (Integer) -> Integer + + attr_reader order: OpenAI::Models::Conversations::ItemListParams::order? + + def order=: ( + OpenAI::Models::Conversations::ItemListParams::order + ) -> OpenAI::Models::Conversations::ItemListParams::order + + def initialize: ( + ?after: String, + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?limit: Integer, + ?order: OpenAI::Models::Conversations::ItemListParams::order, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + include: ::Array[OpenAI::Models::Responses::response_includable], + limit: Integer, + order: OpenAI::Models::Conversations::ItemListParams::order, + request_options: OpenAI::RequestOptions + } + + type order = :asc | :desc + + module Order + extend OpenAI::Internal::Type::Enum + + ASC: :asc + DESC: :desc + + def self?.values: -> ::Array[OpenAI::Models::Conversations::ItemListParams::order] + end + end + end + end +end diff --git a/sig/openai/models/conversations/item_retrieve_params.rbs b/sig/openai/models/conversations/item_retrieve_params.rbs new file mode 100644 index 00000000..c2a3f209 --- /dev/null +++ b/sig/openai/models/conversations/item_retrieve_params.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Conversations + type item_retrieve_params = + { + conversation_id: String, + include: ::Array[OpenAI::Models::Responses::response_includable] + } + & OpenAI::Internal::Type::request_parameters + + class ItemRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor conversation_id: String + + attr_reader include: ::Array[OpenAI::Models::Responses::response_includable]? + + def include=: ( + ::Array[OpenAI::Models::Responses::response_includable] + ) -> ::Array[OpenAI::Models::Responses::response_includable] + + def initialize: ( + conversation_id: String, + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + conversation_id: String, + include: ::Array[OpenAI::Models::Responses::response_includable], + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/conversations/lob_prob.rbs b/sig/openai/models/conversations/lob_prob.rbs new file mode 100644 index 00000000..7d64c4d1 --- /dev/null +++ b/sig/openai/models/conversations/lob_prob.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Conversations + type lob_prob = + { + token: String, + bytes: ::Array[Integer], + logprob: Float, + top_logprobs: ::Array[OpenAI::Conversations::TopLogProb] + } + + class LobProb < OpenAI::Internal::Type::BaseModel + attr_accessor token: String + + attr_accessor bytes: ::Array[Integer] + + attr_accessor logprob: Float + + attr_accessor top_logprobs: ::Array[OpenAI::Conversations::TopLogProb] + + def initialize: ( + token: String, + bytes: ::Array[Integer], + logprob: Float, + top_logprobs: ::Array[OpenAI::Conversations::TopLogProb] + ) -> void + + def to_hash: -> { + token: String, + bytes: ::Array[Integer], + logprob: Float, + top_logprobs: ::Array[OpenAI::Conversations::TopLogProb] + } + end + end + end +end diff --git a/sig/openai/models/conversations/message.rbs b/sig/openai/models/conversations/message.rbs new file mode 100644 index 00000000..370dc4c3 --- /dev/null +++ b/sig/openai/models/conversations/message.rbs @@ -0,0 +1,95 @@ +module OpenAI + module Models + module Conversations + type message = + { + id: String, + content: ::Array[OpenAI::Models::Conversations::Message::content], + role: OpenAI::Models::Conversations::Message::role, + status: OpenAI::Models::Conversations::Message::status, + type: :message + } + + class Message < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor content: ::Array[OpenAI::Models::Conversations::Message::content] + + attr_accessor role: OpenAI::Models::Conversations::Message::role + + attr_accessor status: OpenAI::Models::Conversations::Message::status + + attr_accessor type: :message + + def initialize: ( + id: String, + content: ::Array[OpenAI::Models::Conversations::Message::content], + role: OpenAI::Models::Conversations::Message::role, + status: OpenAI::Models::Conversations::Message::status, + ?type: :message + ) -> void + + def to_hash: -> { + id: String, + content: ::Array[OpenAI::Models::Conversations::Message::content], + role: OpenAI::Models::Conversations::Message::role, + status: OpenAI::Models::Conversations::Message::status, + type: :message + } + + type content = + OpenAI::Conversations::InputTextContent + | OpenAI::Conversations::OutputTextContent + | OpenAI::Conversations::TextContent + | OpenAI::Conversations::SummaryTextContent + | OpenAI::Conversations::RefusalContent + | OpenAI::Conversations::InputImageContent + | OpenAI::Conversations::ComputerScreenshotContent + | OpenAI::Conversations::InputFileContent + + module Content + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Conversations::Message::content] + end + + type role = + :unknown + | :user + | :assistant + | :system + | :critic + | :discriminator + | :developer + | :tool + + module Role + extend OpenAI::Internal::Type::Enum + + UNKNOWN: :unknown + USER: :user + ASSISTANT: :assistant + SYSTEM: :system + CRITIC: :critic + DISCRIMINATOR: :discriminator + DEVELOPER: :developer + TOOL: :tool + + def self?.values: -> ::Array[OpenAI::Models::Conversations::Message::role] + end + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Conversations::Message::status] + end + end + end + end +end diff --git a/sig/openai/models/conversations/output_text_content.rbs b/sig/openai/models/conversations/output_text_content.rbs new file mode 100644 index 00000000..fbb50d7b --- /dev/null +++ b/sig/openai/models/conversations/output_text_content.rbs @@ -0,0 +1,52 @@ +module OpenAI + module Models + module Conversations + type output_text_content = + { + annotations: ::Array[OpenAI::Models::Conversations::OutputTextContent::annotation], + text: String, + type: :output_text, + logprobs: ::Array[OpenAI::Conversations::LobProb] + } + + class OutputTextContent < OpenAI::Internal::Type::BaseModel + attr_accessor annotations: ::Array[OpenAI::Models::Conversations::OutputTextContent::annotation] + + attr_accessor text: String + + attr_accessor type: :output_text + + attr_reader logprobs: ::Array[OpenAI::Conversations::LobProb]? + + def logprobs=: ( + ::Array[OpenAI::Conversations::LobProb] + ) -> ::Array[OpenAI::Conversations::LobProb] + + def initialize: ( + annotations: ::Array[OpenAI::Models::Conversations::OutputTextContent::annotation], + text: String, + ?logprobs: ::Array[OpenAI::Conversations::LobProb], + ?type: :output_text + ) -> void + + def to_hash: -> { + annotations: ::Array[OpenAI::Models::Conversations::OutputTextContent::annotation], + text: String, + type: :output_text, + logprobs: ::Array[OpenAI::Conversations::LobProb] + } + + type annotation = + OpenAI::Conversations::FileCitationBody + | OpenAI::Conversations::URLCitationBody + | OpenAI::Conversations::ContainerFileCitationBody + + module Annotation + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Conversations::OutputTextContent::annotation] + end + end + end + end +end diff --git a/sig/openai/models/conversations/refusal_content.rbs b/sig/openai/models/conversations/refusal_content.rbs new file mode 100644 index 00000000..904922e1 --- /dev/null +++ b/sig/openai/models/conversations/refusal_content.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Conversations + type refusal_content = { refusal: String, type: :refusal } + + class RefusalContent < OpenAI::Internal::Type::BaseModel + attr_accessor refusal: String + + attr_accessor type: :refusal + + def initialize: (refusal: String, ?type: :refusal) -> void + + def to_hash: -> { refusal: String, type: :refusal } + end + end + end +end diff --git a/sig/openai/models/conversations/summary_text_content.rbs b/sig/openai/models/conversations/summary_text_content.rbs new file mode 100644 index 00000000..33844203 --- /dev/null +++ b/sig/openai/models/conversations/summary_text_content.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Conversations + type summary_text_content = { text: String, type: :summary_text } + + class SummaryTextContent < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :summary_text + + def initialize: (text: String, ?type: :summary_text) -> void + + def to_hash: -> { text: String, type: :summary_text } + end + end + end +end diff --git a/sig/openai/models/conversations/text_content.rbs b/sig/openai/models/conversations/text_content.rbs new file mode 100644 index 00000000..34216457 --- /dev/null +++ b/sig/openai/models/conversations/text_content.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Conversations + type text_content = { text: String, type: :text } + + class TextContent < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :text + + def initialize: (text: String, ?type: :text) -> void + + def to_hash: -> { text: String, type: :text } + end + end + end +end diff --git a/sig/openai/models/conversations/top_log_prob.rbs b/sig/openai/models/conversations/top_log_prob.rbs new file mode 100644 index 00000000..43425978 --- /dev/null +++ b/sig/openai/models/conversations/top_log_prob.rbs @@ -0,0 +1,28 @@ +module OpenAI + module Models + module Conversations + type top_log_prob = + { token: String, bytes: ::Array[Integer], logprob: Float } + + class TopLogProb < OpenAI::Internal::Type::BaseModel + attr_accessor token: String + + attr_accessor bytes: ::Array[Integer] + + attr_accessor logprob: Float + + def initialize: ( + token: String, + bytes: ::Array[Integer], + logprob: Float + ) -> void + + def to_hash: -> { + token: String, + bytes: ::Array[Integer], + logprob: Float + } + end + end + end +end diff --git a/sig/openai/models/conversations/url_citation_body.rbs b/sig/openai/models/conversations/url_citation_body.rbs new file mode 100644 index 00000000..70a4b20e --- /dev/null +++ b/sig/openai/models/conversations/url_citation_body.rbs @@ -0,0 +1,42 @@ +module OpenAI + module Models + module Conversations + type url_citation_body = + { + end_index: Integer, + start_index: Integer, + title: String, + type: :url_citation, + url: String + } + + class URLCitationBody < OpenAI::Internal::Type::BaseModel + attr_accessor end_index: Integer + + attr_accessor start_index: Integer + + attr_accessor title: String + + attr_accessor type: :url_citation + + attr_accessor url: String + + def initialize: ( + end_index: Integer, + start_index: Integer, + title: String, + url: String, + ?type: :url_citation + ) -> void + + def to_hash: -> { + end_index: Integer, + start_index: Integer, + title: String, + type: :url_citation, + url: String + } + end + end + end +end diff --git a/sig/openai/models/create_embedding_response.rbs b/sig/openai/models/create_embedding_response.rbs index d6fa5f3d..92433cab 100644 --- a/sig/openai/models/create_embedding_response.rbs +++ b/sig/openai/models/create_embedding_response.rbs @@ -2,49 +2,45 @@ module OpenAI module Models type create_embedding_response = { - data: ::Array[OpenAI::Models::Embedding], + data: ::Array[OpenAI::Embedding], model: String, object: :list, - usage: OpenAI::Models::CreateEmbeddingResponse::Usage + usage: OpenAI::CreateEmbeddingResponse::Usage } - class CreateEmbeddingResponse < OpenAI::BaseModel - attr_accessor data: ::Array[OpenAI::Models::Embedding] + class CreateEmbeddingResponse < OpenAI::Internal::Type::BaseModel + attr_accessor data: ::Array[OpenAI::Embedding] attr_accessor model: String attr_accessor object: :list - attr_accessor usage: OpenAI::Models::CreateEmbeddingResponse::Usage + attr_accessor usage: OpenAI::CreateEmbeddingResponse::Usage - def initialize: - ( - data: ::Array[OpenAI::Models::Embedding], - model: String, - usage: OpenAI::Models::CreateEmbeddingResponse::Usage, - object: :list - ) -> void - | ( - ?OpenAI::Models::create_embedding_response | OpenAI::BaseModel data - ) -> void + def initialize: ( + data: ::Array[OpenAI::Embedding], + model: String, + usage: OpenAI::CreateEmbeddingResponse::Usage, + ?object: :list + ) -> void - def to_hash: -> OpenAI::Models::create_embedding_response + def to_hash: -> { + data: ::Array[OpenAI::Embedding], + model: String, + object: :list, + usage: OpenAI::CreateEmbeddingResponse::Usage + } type usage = { prompt_tokens: Integer, total_tokens: Integer } - class Usage < OpenAI::BaseModel + class Usage < OpenAI::Internal::Type::BaseModel attr_accessor prompt_tokens: Integer attr_accessor total_tokens: Integer - def initialize: - (prompt_tokens: Integer, total_tokens: Integer) -> void - | ( - ?OpenAI::Models::CreateEmbeddingResponse::usage - | OpenAI::BaseModel data - ) -> void + def initialize: (prompt_tokens: Integer, total_tokens: Integer) -> void - def to_hash: -> OpenAI::Models::CreateEmbeddingResponse::usage + def to_hash: -> { prompt_tokens: Integer, total_tokens: Integer } end end end diff --git a/sig/openai/models/custom_tool_input_format.rbs b/sig/openai/models/custom_tool_input_format.rbs new file mode 100644 index 00000000..b0898e18 --- /dev/null +++ b/sig/openai/models/custom_tool_input_format.rbs @@ -0,0 +1,61 @@ +module OpenAI + module Models + type custom_tool_input_format = + OpenAI::CustomToolInputFormat::Text + | OpenAI::CustomToolInputFormat::Grammar + + module CustomToolInputFormat + extend OpenAI::Internal::Type::Union + + type text = { type: :text } + + class Text < OpenAI::Internal::Type::BaseModel + attr_accessor type: :text + + def initialize: (?type: :text) -> void + + def to_hash: -> { type: :text } + end + + type grammar = + { + definition: String, + syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax, + type: :grammar + } + + class Grammar < OpenAI::Internal::Type::BaseModel + attr_accessor definition: String + + attr_accessor syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax + + attr_accessor type: :grammar + + def initialize: ( + definition: String, + syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax, + ?type: :grammar + ) -> void + + def to_hash: -> { + definition: String, + syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax, + type: :grammar + } + + type syntax = :lark | :regex + + module Syntax + extend OpenAI::Internal::Type::Enum + + LARK: :lark + REGEX: :regex + + def self?.values: -> ::Array[OpenAI::Models::CustomToolInputFormat::Grammar::syntax] + end + end + + def self?.variants: -> ::Array[OpenAI::Models::custom_tool_input_format] + end + end +end diff --git a/sig/openai/models/embedding.rbs b/sig/openai/models/embedding.rbs index 997cabfc..3c411347 100644 --- a/sig/openai/models/embedding.rbs +++ b/sig/openai/models/embedding.rbs @@ -3,18 +3,24 @@ module OpenAI type embedding = { embedding: ::Array[Float], index: Integer, object: :embedding } - class Embedding < OpenAI::BaseModel + class Embedding < OpenAI::Internal::Type::BaseModel attr_accessor embedding: ::Array[Float] attr_accessor index: Integer attr_accessor object: :embedding - def initialize: - (embedding: ::Array[Float], index: Integer, object: :embedding) -> void - | (?OpenAI::Models::embedding | OpenAI::BaseModel data) -> void + def initialize: ( + embedding: ::Array[Float], + index: Integer, + ?object: :embedding + ) -> void - def to_hash: -> OpenAI::Models::embedding + def to_hash: -> { + embedding: ::Array[Float], + index: Integer, + object: :embedding + } end end end diff --git a/sig/openai/models/embedding_create_params.rbs b/sig/openai/models/embedding_create_params.rbs index fdddf90e..4600282f 100644 --- a/sig/openai/models/embedding_create_params.rbs +++ b/sig/openai/models/embedding_create_params.rbs @@ -8,11 +8,11 @@ module OpenAI encoding_format: OpenAI::Models::EmbeddingCreateParams::encoding_format, user: String } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class EmbeddingCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class EmbeddingCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor input: OpenAI::Models::EmbeddingCreateParams::input @@ -32,53 +32,56 @@ module OpenAI def user=: (String) -> String - def initialize: - ( - input: OpenAI::Models::EmbeddingCreateParams::input, - model: OpenAI::Models::EmbeddingCreateParams::model, - dimensions: Integer, - encoding_format: OpenAI::Models::EmbeddingCreateParams::encoding_format, - user: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::embedding_create_params | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::embedding_create_params + def initialize: ( + input: OpenAI::Models::EmbeddingCreateParams::input, + model: OpenAI::Models::EmbeddingCreateParams::model, + ?dimensions: Integer, + ?encoding_format: OpenAI::Models::EmbeddingCreateParams::encoding_format, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + input: OpenAI::Models::EmbeddingCreateParams::input, + model: OpenAI::Models::EmbeddingCreateParams::model, + dimensions: Integer, + encoding_format: OpenAI::Models::EmbeddingCreateParams::encoding_format, + user: String, + request_options: OpenAI::RequestOptions + } type input = String | ::Array[String] | ::Array[Integer] | ::Array[::Array[Integer]] - class Input < OpenAI::Union - type string_array = ::Array[String] - - StringArray: string_array + module Input + extend OpenAI::Internal::Type::Union - type integer_array = ::Array[Integer] + def self?.variants: -> ::Array[OpenAI::Models::EmbeddingCreateParams::input] - IntegerArray: integer_array + StringArray: OpenAI::Internal::Type::Converter - type array_of_token2_d_array = ::Array[::Array[Integer]] + IntegerArray: OpenAI::Internal::Type::Converter - ArrayOfToken2DArray: array_of_token2_d_array - - private def self.variants: -> [[nil, String], [nil, ::Array[String]], [nil, ::Array[Integer]], [nil, ::Array[::Array[Integer]]]] + ArrayOfToken2DArray: OpenAI::Internal::Type::Converter end type model = String | OpenAI::Models::embedding_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::embedding_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::EmbeddingCreateParams::model] end type encoding_format = :float | :base64 - class EncodingFormat < OpenAI::Enum + module EncodingFormat + extend OpenAI::Internal::Type::Enum + FLOAT: :float BASE64: :base64 - def self.values: -> ::Array[OpenAI::Models::EmbeddingCreateParams::encoding_format] + def self?.values: -> ::Array[OpenAI::Models::EmbeddingCreateParams::encoding_format] end end end diff --git a/sig/openai/models/embedding_model.rbs b/sig/openai/models/embedding_model.rbs index c334a8a5..eebc0c7b 100644 --- a/sig/openai/models/embedding_model.rbs +++ b/sig/openai/models/embedding_model.rbs @@ -5,12 +5,14 @@ module OpenAI | :"text-embedding-3-small" | :"text-embedding-3-large" - class EmbeddingModel < OpenAI::Enum + module EmbeddingModel + extend OpenAI::Internal::Type::Enum + TEXT_EMBEDDING_ADA_002: :"text-embedding-ada-002" TEXT_EMBEDDING_3_SMALL: :"text-embedding-3-small" TEXT_EMBEDDING_3_LARGE: :"text-embedding-3-large" - def self.values: -> ::Array[OpenAI::Models::embedding_model] + def self?.values: -> ::Array[OpenAI::Models::embedding_model] end end end diff --git a/sig/openai/models/error_object.rbs b/sig/openai/models/error_object.rbs index 1e44e215..9ee1bb01 100644 --- a/sig/openai/models/error_object.rbs +++ b/sig/openai/models/error_object.rbs @@ -3,7 +3,7 @@ module OpenAI type error_object = { code: String?, message: String, param: String?, type: String } - class ErrorObject < OpenAI::BaseModel + class ErrorObject < OpenAI::Internal::Type::BaseModel attr_accessor code: String? attr_accessor message: String @@ -12,11 +12,19 @@ module OpenAI attr_accessor type: String - def initialize: - (code: String?, message: String, param: String?, type: String) -> void - | (?OpenAI::Models::error_object | OpenAI::BaseModel data) -> void + def initialize: ( + code: String?, + message: String, + param: String?, + type: String + ) -> void - def to_hash: -> OpenAI::Models::error_object + def to_hash: -> { + code: String?, + message: String, + param: String?, + type: String + } end end end diff --git a/sig/openai/models/eval_create_params.rbs b/sig/openai/models/eval_create_params.rbs new file mode 100644 index 00000000..88abb4fb --- /dev/null +++ b/sig/openai/models/eval_create_params.rbs @@ -0,0 +1,337 @@ +module OpenAI + module Models + type eval_create_params = + { + data_source_config: OpenAI::Models::EvalCreateParams::data_source_config, + testing_criteria: ::Array[OpenAI::Models::EvalCreateParams::testing_criterion], + metadata: OpenAI::Models::metadata?, + name: String + } + & OpenAI::Internal::Type::request_parameters + + class EvalCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor data_source_config: OpenAI::Models::EvalCreateParams::data_source_config + + attr_accessor testing_criteria: ::Array[OpenAI::Models::EvalCreateParams::testing_criterion] + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_reader name: String? + + def name=: (String) -> String + + def initialize: ( + data_source_config: OpenAI::Models::EvalCreateParams::data_source_config, + testing_criteria: ::Array[OpenAI::Models::EvalCreateParams::testing_criterion], + ?metadata: OpenAI::Models::metadata?, + ?name: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + data_source_config: OpenAI::Models::EvalCreateParams::data_source_config, + testing_criteria: ::Array[OpenAI::Models::EvalCreateParams::testing_criterion], + metadata: OpenAI::Models::metadata?, + name: String, + request_options: OpenAI::RequestOptions + } + + type data_source_config = + OpenAI::EvalCreateParams::DataSourceConfig::Custom + | OpenAI::EvalCreateParams::DataSourceConfig::Logs + | OpenAI::EvalCreateParams::DataSourceConfig::StoredCompletions + + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + type custom = + { + item_schema: ::Hash[Symbol, top], + type: :custom, + include_sample_schema: bool + } + + class Custom < OpenAI::Internal::Type::BaseModel + attr_accessor item_schema: ::Hash[Symbol, top] + + attr_accessor type: :custom + + attr_reader include_sample_schema: bool? + + def include_sample_schema=: (bool) -> bool + + def initialize: ( + item_schema: ::Hash[Symbol, top], + ?include_sample_schema: bool, + ?type: :custom + ) -> void + + def to_hash: -> { + item_schema: ::Hash[Symbol, top], + type: :custom, + include_sample_schema: bool + } + end + + type logs = { type: :logs, metadata: ::Hash[Symbol, top] } + + class Logs < OpenAI::Internal::Type::BaseModel + attr_accessor type: :logs + + attr_reader metadata: ::Hash[Symbol, top]? + + def metadata=: (::Hash[Symbol, top]) -> ::Hash[Symbol, top] + + def initialize: (?metadata: ::Hash[Symbol, top], ?type: :logs) -> void + + def to_hash: -> { type: :logs, metadata: ::Hash[Symbol, top] } + end + + type stored_completions = + { type: :stored_completions, metadata: ::Hash[Symbol, top] } + + class StoredCompletions < OpenAI::Internal::Type::BaseModel + attr_accessor type: :stored_completions + + attr_reader metadata: ::Hash[Symbol, top]? + + def metadata=: (::Hash[Symbol, top]) -> ::Hash[Symbol, top] + + def initialize: ( + ?metadata: ::Hash[Symbol, top], + ?type: :stored_completions + ) -> void + + def to_hash: -> { + type: :stored_completions, + metadata: ::Hash[Symbol, top] + } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalCreateParams::data_source_config] + end + + type testing_criterion = + OpenAI::EvalCreateParams::TestingCriterion::LabelModel + | OpenAI::Graders::StringCheckGrader + | OpenAI::EvalCreateParams::TestingCriterion::TextSimilarity + | OpenAI::EvalCreateParams::TestingCriterion::Python + | OpenAI::EvalCreateParams::TestingCriterion::ScoreModel + + module TestingCriterion + extend OpenAI::Internal::Type::Union + + type label_model = + { + input: ::Array[OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::input], + labels: ::Array[String], + model: String, + name: String, + passing_labels: ::Array[String], + type: :label_model + } + + class LabelModel < OpenAI::Internal::Type::BaseModel + attr_accessor input: ::Array[OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::input] + + attr_accessor labels: ::Array[String] + + attr_accessor model: String + + attr_accessor name: String + + attr_accessor passing_labels: ::Array[String] + + attr_accessor type: :label_model + + def initialize: ( + input: ::Array[OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::input], + labels: ::Array[String], + model: String, + name: String, + passing_labels: ::Array[String], + ?type: :label_model + ) -> void + + def to_hash: -> { + input: ::Array[OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::input], + labels: ::Array[String], + model: String, + name: String, + passing_labels: ::Array[String], + type: :label_model + } + + type input = + OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage + | OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem + + module Input + extend OpenAI::Internal::Type::Union + + type simple_input_message = { content: String, role: String } + + class SimpleInputMessage < OpenAI::Internal::Type::BaseModel + attr_accessor content: String + + attr_accessor role: String + + def initialize: (content: String, role: String) -> void + + def to_hash: -> { content: String, role: String } + end + + type eval_item = + { + content: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::content, + role: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::role, + type: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::type_ + } + + class EvalItem < OpenAI::Internal::Type::BaseModel + attr_accessor content: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::content + + attr_accessor role: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::role + + attr_reader type: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::type_? + + def type=: ( + OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::type_ + ) -> OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::type_ + + def initialize: ( + content: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::content, + role: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::role, + ?type: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::type_ + ) -> void + + def to_hash: -> { + content: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::content, + role: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::role, + type: OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::type_ + } + + type content = + String + | OpenAI::Responses::ResponseInputText + | OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText + | OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage + | ::Array[top] + + module Content + extend OpenAI::Internal::Type::Union + + type output_text = { text: String, type: :output_text } + + class OutputText < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :output_text + + def initialize: (text: String, ?type: :output_text) -> void + + def to_hash: -> { text: String, type: :output_text } + end + + type input_image = + { image_url: String, type: :input_image, detail: String } + + class InputImage < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: String + + attr_accessor type: :input_image + + attr_reader detail: String? + + def detail=: (String) -> String + + def initialize: ( + image_url: String, + ?detail: String, + ?type: :input_image + ) -> void + + def to_hash: -> { + image_url: String, + type: :input_image, + detail: String + } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::content] + + AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter + end + + type role = :user | :assistant | :system | :developer + + module Role + extend OpenAI::Internal::Type::Enum + + USER: :user + ASSISTANT: :assistant + SYSTEM: :system + DEVELOPER: :developer + + def self?.values: -> ::Array[OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::role] + end + + type type_ = :message + + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE: :message + + def self?.values: -> ::Array[OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::type_] + end + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::input] + end + end + + type text_similarity = { pass_threshold: Float } + + class TextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + def pass_threshold: -> Float + + def pass_threshold=: (Float _) -> Float + + def initialize: (pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + type python = { pass_threshold: Float } + + class Python < OpenAI::Models::Graders::PythonGrader + def pass_threshold: -> Float? + + def pass_threshold=: (Float _) -> Float + + def initialize: (?pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + type score_model = { pass_threshold: Float } + + class ScoreModel < OpenAI::Models::Graders::ScoreModelGrader + def pass_threshold: -> Float? + + def pass_threshold=: (Float _) -> Float + + def initialize: (?pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalCreateParams::testing_criterion] + end + end + end +end diff --git a/sig/openai/models/eval_create_response.rbs b/sig/openai/models/eval_create_response.rbs new file mode 100644 index 00000000..0d4799af --- /dev/null +++ b/sig/openai/models/eval_create_response.rbs @@ -0,0 +1,137 @@ +module OpenAI + module Models + type eval_create_response = + { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalCreateResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + object: :eval, + testing_criteria: ::Array[OpenAI::Models::EvalCreateResponse::testing_criterion] + } + + class EvalCreateResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data_source_config: OpenAI::Models::EvalCreateResponse::data_source_config + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_accessor name: String + + attr_accessor object: :eval + + attr_accessor testing_criteria: ::Array[OpenAI::Models::EvalCreateResponse::testing_criterion] + + def initialize: ( + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalCreateResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + testing_criteria: ::Array[OpenAI::Models::EvalCreateResponse::testing_criterion], + ?object: :eval + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalCreateResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + object: :eval, + testing_criteria: ::Array[OpenAI::Models::EvalCreateResponse::testing_criterion] + } + + type data_source_config = + OpenAI::EvalCustomDataSourceConfig + | OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs + | OpenAI::EvalStoredCompletionsDataSourceConfig + + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + type logs = + { + schema: ::Hash[Symbol, top], + type: :logs, + metadata: OpenAI::Models::metadata? + } + + class Logs < OpenAI::Internal::Type::BaseModel + attr_accessor schema: ::Hash[Symbol, top] + + attr_accessor type: :logs + + attr_accessor metadata: OpenAI::Models::metadata? + + def initialize: ( + schema: ::Hash[Symbol, top], + ?metadata: OpenAI::Models::metadata?, + ?type: :logs + ) -> void + + def to_hash: -> { + schema: ::Hash[Symbol, top], + type: :logs, + metadata: OpenAI::Models::metadata? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalCreateResponse::data_source_config] + end + + type testing_criterion = + OpenAI::Models::Graders::LabelModelGrader + | OpenAI::Models::Graders::StringCheckGrader + | OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity + | OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython + | OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel + + module TestingCriterion + extend OpenAI::Internal::Type::Union + + type eval_grader_text_similarity = { pass_threshold: Float } + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + def pass_threshold: -> Float + + def pass_threshold=: (Float _) -> Float + + def initialize: (pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + type eval_grader_python = { pass_threshold: Float } + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + def pass_threshold: -> Float? + + def pass_threshold=: (Float _) -> Float + + def initialize: (?pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + type eval_grader_score_model = { pass_threshold: Float } + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + def pass_threshold: -> Float? + + def pass_threshold=: (Float _) -> Float + + def initialize: (?pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalCreateResponse::testing_criterion] + end + end + end +end diff --git a/sig/openai/models/eval_custom_data_source_config.rbs b/sig/openai/models/eval_custom_data_source_config.rbs new file mode 100644 index 00000000..6d412cef --- /dev/null +++ b/sig/openai/models/eval_custom_data_source_config.rbs @@ -0,0 +1,16 @@ +module OpenAI + module Models + type eval_custom_data_source_config = + { schema: ::Hash[Symbol, top], type: :custom } + + class EvalCustomDataSourceConfig < OpenAI::Internal::Type::BaseModel + attr_accessor schema: ::Hash[Symbol, top] + + attr_accessor type: :custom + + def initialize: (schema: ::Hash[Symbol, top], ?type: :custom) -> void + + def to_hash: -> { schema: ::Hash[Symbol, top], type: :custom } + end + end +end diff --git a/sig/openai/models/eval_delete_params.rbs b/sig/openai/models/eval_delete_params.rbs new file mode 100644 index 00000000..f5a6eb71 --- /dev/null +++ b/sig/openai/models/eval_delete_params.rbs @@ -0,0 +1,14 @@ +module OpenAI + module Models + type eval_delete_params = { } & OpenAI::Internal::Type::request_parameters + + class EvalDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + def initialize: (?request_options: OpenAI::request_opts) -> void + + def to_hash: -> { request_options: OpenAI::RequestOptions } + end + end +end diff --git a/sig/openai/models/eval_delete_response.rbs b/sig/openai/models/eval_delete_response.rbs new file mode 100644 index 00000000..95ec2c86 --- /dev/null +++ b/sig/openai/models/eval_delete_response.rbs @@ -0,0 +1,18 @@ +module OpenAI + module Models + type eval_delete_response = + { deleted: bool, eval_id: String, object: String } + + class EvalDeleteResponse < OpenAI::Internal::Type::BaseModel + attr_accessor deleted: bool + + attr_accessor eval_id: String + + attr_accessor object: String + + def initialize: (deleted: bool, eval_id: String, object: String) -> void + + def to_hash: -> { deleted: bool, eval_id: String, object: String } + end + end +end diff --git a/sig/openai/models/eval_list_params.rbs b/sig/openai/models/eval_list_params.rbs new file mode 100644 index 00000000..ff25dbd4 --- /dev/null +++ b/sig/openai/models/eval_list_params.rbs @@ -0,0 +1,75 @@ +module OpenAI + module Models + type eval_list_params = + { + after: String, + limit: Integer, + order: OpenAI::Models::EvalListParams::order, + order_by: OpenAI::Models::EvalListParams::order_by + } + & OpenAI::Internal::Type::request_parameters + + class EvalListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_reader after: String? + + def after=: (String) -> String + + attr_reader limit: Integer? + + def limit=: (Integer) -> Integer + + attr_reader order: OpenAI::Models::EvalListParams::order? + + def order=: ( + OpenAI::Models::EvalListParams::order + ) -> OpenAI::Models::EvalListParams::order + + attr_reader order_by: OpenAI::Models::EvalListParams::order_by? + + def order_by=: ( + OpenAI::Models::EvalListParams::order_by + ) -> OpenAI::Models::EvalListParams::order_by + + def initialize: ( + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::EvalListParams::order, + ?order_by: OpenAI::Models::EvalListParams::order_by, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + limit: Integer, + order: OpenAI::Models::EvalListParams::order, + order_by: OpenAI::Models::EvalListParams::order_by, + request_options: OpenAI::RequestOptions + } + + type order = :asc | :desc + + module Order + extend OpenAI::Internal::Type::Enum + + ASC: :asc + DESC: :desc + + def self?.values: -> ::Array[OpenAI::Models::EvalListParams::order] + end + + type order_by = :created_at | :updated_at + + module OrderBy + extend OpenAI::Internal::Type::Enum + + CREATED_AT: :created_at + UPDATED_AT: :updated_at + + def self?.values: -> ::Array[OpenAI::Models::EvalListParams::order_by] + end + end + end +end diff --git a/sig/openai/models/eval_list_response.rbs b/sig/openai/models/eval_list_response.rbs new file mode 100644 index 00000000..ef01d7c9 --- /dev/null +++ b/sig/openai/models/eval_list_response.rbs @@ -0,0 +1,137 @@ +module OpenAI + module Models + type eval_list_response = + { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalListResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + object: :eval, + testing_criteria: ::Array[OpenAI::Models::EvalListResponse::testing_criterion] + } + + class EvalListResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data_source_config: OpenAI::Models::EvalListResponse::data_source_config + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_accessor name: String + + attr_accessor object: :eval + + attr_accessor testing_criteria: ::Array[OpenAI::Models::EvalListResponse::testing_criterion] + + def initialize: ( + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalListResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + testing_criteria: ::Array[OpenAI::Models::EvalListResponse::testing_criterion], + ?object: :eval + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalListResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + object: :eval, + testing_criteria: ::Array[OpenAI::Models::EvalListResponse::testing_criterion] + } + + type data_source_config = + OpenAI::EvalCustomDataSourceConfig + | OpenAI::Models::EvalListResponse::DataSourceConfig::Logs + | OpenAI::EvalStoredCompletionsDataSourceConfig + + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + type logs = + { + schema: ::Hash[Symbol, top], + type: :logs, + metadata: OpenAI::Models::metadata? + } + + class Logs < OpenAI::Internal::Type::BaseModel + attr_accessor schema: ::Hash[Symbol, top] + + attr_accessor type: :logs + + attr_accessor metadata: OpenAI::Models::metadata? + + def initialize: ( + schema: ::Hash[Symbol, top], + ?metadata: OpenAI::Models::metadata?, + ?type: :logs + ) -> void + + def to_hash: -> { + schema: ::Hash[Symbol, top], + type: :logs, + metadata: OpenAI::Models::metadata? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalListResponse::data_source_config] + end + + type testing_criterion = + OpenAI::Models::Graders::LabelModelGrader + | OpenAI::Models::Graders::StringCheckGrader + | OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity + | OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython + | OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel + + module TestingCriterion + extend OpenAI::Internal::Type::Union + + type eval_grader_text_similarity = { pass_threshold: Float } + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + def pass_threshold: -> Float + + def pass_threshold=: (Float _) -> Float + + def initialize: (pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + type eval_grader_python = { pass_threshold: Float } + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + def pass_threshold: -> Float? + + def pass_threshold=: (Float _) -> Float + + def initialize: (?pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + type eval_grader_score_model = { pass_threshold: Float } + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + def pass_threshold: -> Float? + + def pass_threshold=: (Float _) -> Float + + def initialize: (?pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalListResponse::testing_criterion] + end + end + end +end diff --git a/sig/openai/models/eval_retrieve_params.rbs b/sig/openai/models/eval_retrieve_params.rbs new file mode 100644 index 00000000..167a0920 --- /dev/null +++ b/sig/openai/models/eval_retrieve_params.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + type eval_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters + + class EvalRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + def initialize: (?request_options: OpenAI::request_opts) -> void + + def to_hash: -> { request_options: OpenAI::RequestOptions } + end + end +end diff --git a/sig/openai/models/eval_retrieve_response.rbs b/sig/openai/models/eval_retrieve_response.rbs new file mode 100644 index 00000000..10f46696 --- /dev/null +++ b/sig/openai/models/eval_retrieve_response.rbs @@ -0,0 +1,137 @@ +module OpenAI + module Models + type eval_retrieve_response = + { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalRetrieveResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + object: :eval, + testing_criteria: ::Array[OpenAI::Models::EvalRetrieveResponse::testing_criterion] + } + + class EvalRetrieveResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data_source_config: OpenAI::Models::EvalRetrieveResponse::data_source_config + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_accessor name: String + + attr_accessor object: :eval + + attr_accessor testing_criteria: ::Array[OpenAI::Models::EvalRetrieveResponse::testing_criterion] + + def initialize: ( + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalRetrieveResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + testing_criteria: ::Array[OpenAI::Models::EvalRetrieveResponse::testing_criterion], + ?object: :eval + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalRetrieveResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + object: :eval, + testing_criteria: ::Array[OpenAI::Models::EvalRetrieveResponse::testing_criterion] + } + + type data_source_config = + OpenAI::EvalCustomDataSourceConfig + | OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs + | OpenAI::EvalStoredCompletionsDataSourceConfig + + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + type logs = + { + schema: ::Hash[Symbol, top], + type: :logs, + metadata: OpenAI::Models::metadata? + } + + class Logs < OpenAI::Internal::Type::BaseModel + attr_accessor schema: ::Hash[Symbol, top] + + attr_accessor type: :logs + + attr_accessor metadata: OpenAI::Models::metadata? + + def initialize: ( + schema: ::Hash[Symbol, top], + ?metadata: OpenAI::Models::metadata?, + ?type: :logs + ) -> void + + def to_hash: -> { + schema: ::Hash[Symbol, top], + type: :logs, + metadata: OpenAI::Models::metadata? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalRetrieveResponse::data_source_config] + end + + type testing_criterion = + OpenAI::Models::Graders::LabelModelGrader + | OpenAI::Models::Graders::StringCheckGrader + | OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity + | OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython + | OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel + + module TestingCriterion + extend OpenAI::Internal::Type::Union + + type eval_grader_text_similarity = { pass_threshold: Float } + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + def pass_threshold: -> Float + + def pass_threshold=: (Float _) -> Float + + def initialize: (pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + type eval_grader_python = { pass_threshold: Float } + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + def pass_threshold: -> Float? + + def pass_threshold=: (Float _) -> Float + + def initialize: (?pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + type eval_grader_score_model = { pass_threshold: Float } + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + def pass_threshold: -> Float? + + def pass_threshold=: (Float _) -> Float + + def initialize: (?pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalRetrieveResponse::testing_criterion] + end + end + end +end diff --git a/sig/openai/models/eval_stored_completions_data_source_config.rbs b/sig/openai/models/eval_stored_completions_data_source_config.rbs new file mode 100644 index 00000000..1237e523 --- /dev/null +++ b/sig/openai/models/eval_stored_completions_data_source_config.rbs @@ -0,0 +1,30 @@ +module OpenAI + module Models + type eval_stored_completions_data_source_config = + { + schema: ::Hash[Symbol, top], + type: :stored_completions, + metadata: OpenAI::Models::metadata? + } + + class EvalStoredCompletionsDataSourceConfig < OpenAI::Internal::Type::BaseModel + attr_accessor schema: ::Hash[Symbol, top] + + attr_accessor type: :stored_completions + + attr_accessor metadata: OpenAI::Models::metadata? + + def initialize: ( + schema: ::Hash[Symbol, top], + ?metadata: OpenAI::Models::metadata?, + ?type: :stored_completions + ) -> void + + def to_hash: -> { + schema: ::Hash[Symbol, top], + type: :stored_completions, + metadata: OpenAI::Models::metadata? + } + end + end +end diff --git a/sig/openai/models/eval_update_params.rbs b/sig/openai/models/eval_update_params.rbs new file mode 100644 index 00000000..fc6c2540 --- /dev/null +++ b/sig/openai/models/eval_update_params.rbs @@ -0,0 +1,30 @@ +module OpenAI + module Models + type eval_update_params = + { metadata: OpenAI::Models::metadata?, name: String } + & OpenAI::Internal::Type::request_parameters + + class EvalUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_reader name: String? + + def name=: (String) -> String + + def initialize: ( + ?metadata: OpenAI::Models::metadata?, + ?name: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + metadata: OpenAI::Models::metadata?, + name: String, + request_options: OpenAI::RequestOptions + } + end + end +end diff --git a/sig/openai/models/eval_update_response.rbs b/sig/openai/models/eval_update_response.rbs new file mode 100644 index 00000000..532a5435 --- /dev/null +++ b/sig/openai/models/eval_update_response.rbs @@ -0,0 +1,137 @@ +module OpenAI + module Models + type eval_update_response = + { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalUpdateResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + object: :eval, + testing_criteria: ::Array[OpenAI::Models::EvalUpdateResponse::testing_criterion] + } + + class EvalUpdateResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data_source_config: OpenAI::Models::EvalUpdateResponse::data_source_config + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_accessor name: String + + attr_accessor object: :eval + + attr_accessor testing_criteria: ::Array[OpenAI::Models::EvalUpdateResponse::testing_criterion] + + def initialize: ( + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalUpdateResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + testing_criteria: ::Array[OpenAI::Models::EvalUpdateResponse::testing_criterion], + ?object: :eval + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalUpdateResponse::data_source_config, + metadata: OpenAI::Models::metadata?, + name: String, + object: :eval, + testing_criteria: ::Array[OpenAI::Models::EvalUpdateResponse::testing_criterion] + } + + type data_source_config = + OpenAI::EvalCustomDataSourceConfig + | OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs + | OpenAI::EvalStoredCompletionsDataSourceConfig + + module DataSourceConfig + extend OpenAI::Internal::Type::Union + + type logs = + { + schema: ::Hash[Symbol, top], + type: :logs, + metadata: OpenAI::Models::metadata? + } + + class Logs < OpenAI::Internal::Type::BaseModel + attr_accessor schema: ::Hash[Symbol, top] + + attr_accessor type: :logs + + attr_accessor metadata: OpenAI::Models::metadata? + + def initialize: ( + schema: ::Hash[Symbol, top], + ?metadata: OpenAI::Models::metadata?, + ?type: :logs + ) -> void + + def to_hash: -> { + schema: ::Hash[Symbol, top], + type: :logs, + metadata: OpenAI::Models::metadata? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalUpdateResponse::data_source_config] + end + + type testing_criterion = + OpenAI::Models::Graders::LabelModelGrader + | OpenAI::Models::Graders::StringCheckGrader + | OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity + | OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython + | OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel + + module TestingCriterion + extend OpenAI::Internal::Type::Union + + type eval_grader_text_similarity = { pass_threshold: Float } + + class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader + def pass_threshold: -> Float + + def pass_threshold=: (Float _) -> Float + + def initialize: (pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + type eval_grader_python = { pass_threshold: Float } + + class EvalGraderPython < OpenAI::Models::Graders::PythonGrader + def pass_threshold: -> Float? + + def pass_threshold=: (Float _) -> Float + + def initialize: (?pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + type eval_grader_score_model = { pass_threshold: Float } + + class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader + def pass_threshold: -> Float? + + def pass_threshold=: (Float _) -> Float + + def initialize: (?pass_threshold: Float) -> void + + def to_hash: -> { pass_threshold: Float } + end + + def self?.variants: -> ::Array[OpenAI::Models::EvalUpdateResponse::testing_criterion] + end + end + end +end diff --git a/sig/openai/models/evals/create_eval_completions_run_data_source.rbs b/sig/openai/models/evals/create_eval_completions_run_data_source.rbs new file mode 100644 index 00000000..9e72fe05 --- /dev/null +++ b/sig/openai/models/evals/create_eval_completions_run_data_source.rbs @@ -0,0 +1,403 @@ +module OpenAI + module Models + module Evals + type create_eval_completions_run_data_source = + { + source: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::source, + type: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::type_, + input_messages: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::input_messages, + model: String, + sampling_params: OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams + } + + class CreateEvalCompletionsRunDataSource < OpenAI::Internal::Type::BaseModel + attr_accessor source: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::source + + attr_accessor type: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::type_ + + attr_reader input_messages: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::input_messages? + + def input_messages=: ( + OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::input_messages + ) -> OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::input_messages + + attr_reader model: String? + + def model=: (String) -> String + + attr_reader sampling_params: OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams? + + def sampling_params=: ( + OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams + ) -> OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams + + def initialize: ( + source: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::source, + type: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::type_, + ?input_messages: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::input_messages, + ?model: String, + ?sampling_params: OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams + ) -> void + + def to_hash: -> { + source: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::source, + type: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::type_, + input_messages: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::input_messages, + model: String, + sampling_params: OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams + } + + type source = + OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent + | OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileID + | OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions + + module Source + extend OpenAI::Internal::Type::Union + + type file_content = + { + content: ::Array[OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content], + type: :file_content + } + + class FileContent < OpenAI::Internal::Type::BaseModel + attr_accessor content: ::Array[OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content] + + attr_accessor type: :file_content + + def initialize: ( + content: ::Array[OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content], + ?type: :file_content + ) -> void + + def to_hash: -> { + content: ::Array[OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content], + type: :file_content + } + + type content = + { item: ::Hash[Symbol, top], sample: ::Hash[Symbol, top] } + + class Content < OpenAI::Internal::Type::BaseModel + attr_accessor item: ::Hash[Symbol, top] + + attr_reader sample: ::Hash[Symbol, top]? + + def sample=: (::Hash[Symbol, top]) -> ::Hash[Symbol, top] + + def initialize: ( + item: ::Hash[Symbol, top], + ?sample: ::Hash[Symbol, top] + ) -> void + + def to_hash: -> { + item: ::Hash[Symbol, top], + sample: ::Hash[Symbol, top] + } + end + end + + type file_id = { id: String, type: :file_id } + + class FileID < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor type: :file_id + + def initialize: (id: String, ?type: :file_id) -> void + + def to_hash: -> { id: String, type: :file_id } + end + + type stored_completions = + { + type: :stored_completions, + created_after: Integer?, + created_before: Integer?, + limit: Integer?, + metadata: OpenAI::Models::metadata?, + model: String? + } + + class StoredCompletions < OpenAI::Internal::Type::BaseModel + attr_accessor type: :stored_completions + + attr_accessor created_after: Integer? + + attr_accessor created_before: Integer? + + attr_accessor limit: Integer? + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_accessor model: String? + + def initialize: ( + ?created_after: Integer?, + ?created_before: Integer?, + ?limit: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: String?, + ?type: :stored_completions + ) -> void + + def to_hash: -> { + type: :stored_completions, + created_after: Integer?, + created_before: Integer?, + limit: Integer?, + metadata: OpenAI::Models::metadata?, + model: String? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::source] + end + + type type_ = :completions + + module Type + extend OpenAI::Internal::Type::Enum + + COMPLETIONS: :completions + + def self?.values: -> ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::type_] + end + + type input_messages = + OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template + | OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference + + module InputMessages + extend OpenAI::Internal::Type::Union + + type template = + { + template: ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::template], + type: :template + } + + class Template < OpenAI::Internal::Type::BaseModel + attr_accessor template: ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::template] + + attr_accessor type: :template + + def initialize: ( + template: ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::template], + ?type: :template + ) -> void + + def to_hash: -> { + template: ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::template], + type: :template + } + + type template = + OpenAI::Responses::EasyInputMessage + | OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem + + module Template + extend OpenAI::Internal::Type::Union + + type eval_item = + { + content: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::type_ + } + + class EvalItem < OpenAI::Internal::Type::BaseModel + attr_accessor content: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::content + + attr_accessor role: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::role + + attr_reader type: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::type_? + + def type=: ( + OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::type_ + ) -> OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::type_ + + def initialize: ( + content: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::role, + ?type: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::type_ + ) -> void + + def to_hash: -> { + content: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::type_ + } + + type content = + String + | OpenAI::Responses::ResponseInputText + | OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText + | OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage + | ::Array[top] + + module Content + extend OpenAI::Internal::Type::Union + + type output_text = { text: String, type: :output_text } + + class OutputText < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :output_text + + def initialize: (text: String, ?type: :output_text) -> void + + def to_hash: -> { text: String, type: :output_text } + end + + type input_image = + { image_url: String, type: :input_image, detail: String } + + class InputImage < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: String + + attr_accessor type: :input_image + + attr_reader detail: String? + + def detail=: (String) -> String + + def initialize: ( + image_url: String, + ?detail: String, + ?type: :input_image + ) -> void + + def to_hash: -> { + image_url: String, + type: :input_image, + detail: String + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::content] + + AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter + end + + type role = :user | :assistant | :system | :developer + + module Role + extend OpenAI::Internal::Type::Enum + + USER: :user + ASSISTANT: :assistant + SYSTEM: :system + DEVELOPER: :developer + + def self?.values: -> ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::role] + end + + type type_ = :message + + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE: :message + + def self?.values: -> ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::EvalItem::type_] + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::template] + end + end + + type item_reference = + { item_reference: String, type: :item_reference } + + class ItemReference < OpenAI::Internal::Type::BaseModel + attr_accessor item_reference: String + + attr_accessor type: :item_reference + + def initialize: ( + item_reference: String, + ?type: :item_reference + ) -> void + + def to_hash: -> { item_reference: String, type: :item_reference } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::input_messages] + end + + type sampling_params = + { + max_completion_tokens: Integer, + response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format, + seed: Integer, + temperature: Float, + tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool], + top_p: Float + } + + class SamplingParams < OpenAI::Internal::Type::BaseModel + attr_reader max_completion_tokens: Integer? + + def max_completion_tokens=: (Integer) -> Integer + + attr_reader response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format? + + def response_format=: ( + OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format + ) -> OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format + + attr_reader seed: Integer? + + def seed=: (Integer) -> Integer + + attr_reader temperature: Float? + + def temperature=: (Float) -> Float + + attr_reader tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool]? + + def tools=: ( + ::Array[OpenAI::Chat::ChatCompletionFunctionTool] + ) -> ::Array[OpenAI::Chat::ChatCompletionFunctionTool] + + attr_reader top_p: Float? + + def top_p=: (Float) -> Float + + def initialize: ( + ?max_completion_tokens: Integer, + ?response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format, + ?seed: Integer, + ?temperature: Float, + ?tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool], + ?top_p: Float + ) -> void + + def to_hash: -> { + max_completion_tokens: Integer, + response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format, + seed: Integer, + temperature: Float, + tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool], + top_p: Float + } + + type response_format = + OpenAI::ResponseFormatText + | OpenAI::ResponseFormatJSONSchema + | OpenAI::ResponseFormatJSONObject + + module ResponseFormat + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format] + end + end + end + end + end +end diff --git a/sig/openai/models/evals/create_eval_jsonl_run_data_source.rbs b/sig/openai/models/evals/create_eval_jsonl_run_data_source.rbs new file mode 100644 index 00000000..ff6c8b6a --- /dev/null +++ b/sig/openai/models/evals/create_eval_jsonl_run_data_source.rbs @@ -0,0 +1,92 @@ +module OpenAI + module Models + module Evals + type create_eval_jsonl_run_data_source = + { + source: OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::source, + type: :jsonl + } + + class CreateEvalJSONLRunDataSource < OpenAI::Internal::Type::BaseModel + attr_accessor source: OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::source + + attr_accessor type: :jsonl + + def initialize: ( + source: OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::source, + ?type: :jsonl + ) -> void + + def to_hash: -> { + source: OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::source, + type: :jsonl + } + + type source = + OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent + | OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileID + + module Source + extend OpenAI::Internal::Type::Union + + type file_content = + { + content: ::Array[OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content], + type: :file_content + } + + class FileContent < OpenAI::Internal::Type::BaseModel + attr_accessor content: ::Array[OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content] + + attr_accessor type: :file_content + + def initialize: ( + content: ::Array[OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content], + ?type: :file_content + ) -> void + + def to_hash: -> { + content: ::Array[OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content], + type: :file_content + } + + type content = + { item: ::Hash[Symbol, top], sample: ::Hash[Symbol, top] } + + class Content < OpenAI::Internal::Type::BaseModel + attr_accessor item: ::Hash[Symbol, top] + + attr_reader sample: ::Hash[Symbol, top]? + + def sample=: (::Hash[Symbol, top]) -> ::Hash[Symbol, top] + + def initialize: ( + item: ::Hash[Symbol, top], + ?sample: ::Hash[Symbol, top] + ) -> void + + def to_hash: -> { + item: ::Hash[Symbol, top], + sample: ::Hash[Symbol, top] + } + end + end + + type file_id = { id: String, type: :file_id } + + class FileID < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor type: :file_id + + def initialize: (id: String, ?type: :file_id) -> void + + def to_hash: -> { id: String, type: :file_id } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::source] + end + end + end + end +end diff --git a/sig/openai/models/evals/eval_api_error.rbs b/sig/openai/models/evals/eval_api_error.rbs new file mode 100644 index 00000000..9504768f --- /dev/null +++ b/sig/openai/models/evals/eval_api_error.rbs @@ -0,0 +1,19 @@ +module OpenAI + module Models + class EvalAPIError = Evals::EvalAPIError + + module Evals + type eval_api_error = { code: String, message: String } + + class EvalAPIError < OpenAI::Internal::Type::BaseModel + attr_accessor code: String + + attr_accessor message: String + + def initialize: (code: String, message: String) -> void + + def to_hash: -> { code: String, message: String } + end + end + end +end diff --git a/sig/openai/models/evals/run_cancel_params.rbs b/sig/openai/models/evals/run_cancel_params.rbs new file mode 100644 index 00000000..19118357 --- /dev/null +++ b/sig/openai/models/evals/run_cancel_params.rbs @@ -0,0 +1,25 @@ +module OpenAI + module Models + module Evals + type run_cancel_params = + { eval_id: String } & OpenAI::Internal::Type::request_parameters + + class RunCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor eval_id: String + + def initialize: ( + eval_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + eval_id: String, + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/evals/run_cancel_response.rbs b/sig/openai/models/evals/run_cancel_response.rbs new file mode 100644 index 00000000..3590bb4d --- /dev/null +++ b/sig/openai/models/evals/run_cancel_response.rbs @@ -0,0 +1,635 @@ +module OpenAI + module Models + module Evals + type run_cancel_response = + { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunCancelResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + object: :"eval.run", + per_model_usage: ::Array[OpenAI::Models::Evals::RunCancelResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunCancelResponse::ResultCounts, + status: String + } + + class RunCancelResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data_source: OpenAI::Models::Evals::RunCancelResponse::data_source + + attr_accessor error: OpenAI::Evals::EvalAPIError + + attr_accessor eval_id: String + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_accessor model: String + + attr_accessor name: String + + attr_accessor object: :"eval.run" + + attr_accessor per_model_usage: ::Array[OpenAI::Models::Evals::RunCancelResponse::PerModelUsage] + + attr_accessor per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult] + + attr_accessor report_url: String + + attr_accessor result_counts: OpenAI::Models::Evals::RunCancelResponse::ResultCounts + + attr_accessor status: String + + def initialize: ( + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunCancelResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + per_model_usage: ::Array[OpenAI::Models::Evals::RunCancelResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunCancelResponse::ResultCounts, + status: String, + ?object: :"eval.run" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunCancelResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + object: :"eval.run", + per_model_usage: ::Array[OpenAI::Models::Evals::RunCancelResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunCancelResponse::ResultCounts, + status: String + } + + type data_source = + OpenAI::Evals::CreateEvalJSONLRunDataSource + | OpenAI::Evals::CreateEvalCompletionsRunDataSource + | OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses + + module DataSource + extend OpenAI::Internal::Type::Union + + type responses = + { + source: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::source, + type: :responses, + input_messages: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::input_messages, + model: String, + sampling_params: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams + } + + class Responses < OpenAI::Internal::Type::BaseModel + attr_accessor source: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::source + + attr_accessor type: :responses + + attr_reader input_messages: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::input_messages? + + def input_messages=: ( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::input_messages + ) -> OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::input_messages + + attr_reader model: String? + + def model=: (String) -> String + + attr_reader sampling_params: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams? + + def sampling_params=: ( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams + ) -> OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams + + def initialize: ( + source: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::source, + ?input_messages: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::input_messages, + ?model: String, + ?sampling_params: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams, + ?type: :responses + ) -> void + + def to_hash: -> { + source: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::source, + type: :responses, + input_messages: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::input_messages, + model: String, + sampling_params: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams + } + + type source = + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent + | OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID + | OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses + + module Source + extend OpenAI::Internal::Type::Union + + type file_content = + { + content: ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content], + type: :file_content + } + + class FileContent < OpenAI::Internal::Type::BaseModel + attr_accessor content: ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content] + + attr_accessor type: :file_content + + def initialize: ( + content: ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content], + ?type: :file_content + ) -> void + + def to_hash: -> { + content: ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content], + type: :file_content + } + + type content = + { item: ::Hash[Symbol, top], sample: ::Hash[Symbol, top] } + + class Content < OpenAI::Internal::Type::BaseModel + attr_accessor item: ::Hash[Symbol, top] + + attr_reader sample: ::Hash[Symbol, top]? + + def sample=: (::Hash[Symbol, top]) -> ::Hash[Symbol, top] + + def initialize: ( + item: ::Hash[Symbol, top], + ?sample: ::Hash[Symbol, top] + ) -> void + + def to_hash: -> { + item: ::Hash[Symbol, top], + sample: ::Hash[Symbol, top] + } + end + end + + type file_id = { id: String, type: :file_id } + + class FileID < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor type: :file_id + + def initialize: (id: String, ?type: :file_id) -> void + + def to_hash: -> { id: String, type: :file_id } + end + + type responses = + { + type: :responses, + created_after: Integer?, + created_before: Integer?, + instructions_search: String?, + metadata: top?, + model: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + temperature: Float?, + tools: ::Array[String]?, + top_p: Float?, + users: ::Array[String]? + } + + class Responses < OpenAI::Internal::Type::BaseModel + attr_accessor type: :responses + + attr_accessor created_after: Integer? + + attr_accessor created_before: Integer? + + attr_accessor instructions_search: String? + + attr_accessor metadata: top? + + attr_accessor model: String? + + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + + attr_accessor temperature: Float? + + attr_accessor tools: ::Array[String]? + + attr_accessor top_p: Float? + + attr_accessor users: ::Array[String]? + + def initialize: ( + ?created_after: Integer?, + ?created_before: Integer?, + ?instructions_search: String?, + ?metadata: top?, + ?model: String?, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?temperature: Float?, + ?tools: ::Array[String]?, + ?top_p: Float?, + ?users: ::Array[String]?, + ?type: :responses + ) -> void + + def to_hash: -> { + type: :responses, + created_after: Integer?, + created_before: Integer?, + instructions_search: String?, + metadata: top?, + model: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + temperature: Float?, + tools: ::Array[String]?, + top_p: Float?, + users: ::Array[String]? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::source] + end + + type input_messages = + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template + | OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference + + module InputMessages + extend OpenAI::Internal::Type::Union + + type template = + { + template: ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::template], + type: :template + } + + class Template < OpenAI::Internal::Type::BaseModel + attr_accessor template: ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::template] + + attr_accessor type: :template + + def initialize: ( + template: ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::template], + ?type: :template + ) -> void + + def to_hash: -> { + template: ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::template], + type: :template + } + + type template = + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage + | OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem + + module Template + extend OpenAI::Internal::Type::Union + + type chat_message = { content: String, role: String } + + class ChatMessage < OpenAI::Internal::Type::BaseModel + attr_accessor content: String + + attr_accessor role: String + + def initialize: (content: String, role: String) -> void + + def to_hash: -> { content: String, role: String } + end + + type eval_item = + { + content: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + } + + class EvalItem < OpenAI::Internal::Type::BaseModel + attr_accessor content: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content + + attr_accessor role: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role + + attr_reader type: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_? + + def type=: ( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + ) -> OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + + def initialize: ( + content: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + ?type: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + ) -> void + + def to_hash: -> { + content: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + } + + type content = + String + | OpenAI::Responses::ResponseInputText + | OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText + | OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage + | ::Array[top] + + module Content + extend OpenAI::Internal::Type::Union + + type output_text = { text: String, type: :output_text } + + class OutputText < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :output_text + + def initialize: ( + text: String, + ?type: :output_text + ) -> void + + def to_hash: -> { text: String, type: :output_text } + end + + type input_image = + { + image_url: String, + type: :input_image, + detail: String + } + + class InputImage < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: String + + attr_accessor type: :input_image + + attr_reader detail: String? + + def detail=: (String) -> String + + def initialize: ( + image_url: String, + ?detail: String, + ?type: :input_image + ) -> void + + def to_hash: -> { + image_url: String, + type: :input_image, + detail: String + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content] + + AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter + end + + type role = :user | :assistant | :system | :developer + + module Role + extend OpenAI::Internal::Type::Enum + + USER: :user + ASSISTANT: :assistant + SYSTEM: :system + DEVELOPER: :developer + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role] + end + + type type_ = :message + + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE: :message + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_] + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::template] + end + end + + type item_reference = + { item_reference: String, type: :item_reference } + + class ItemReference < OpenAI::Internal::Type::BaseModel + attr_accessor item_reference: String + + attr_accessor type: :item_reference + + def initialize: ( + item_reference: String, + ?type: :item_reference + ) -> void + + def to_hash: -> { + item_reference: String, + type: :item_reference + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::input_messages] + end + + type sampling_params = + { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float + } + + class SamplingParams < OpenAI::Internal::Type::BaseModel + attr_reader max_completion_tokens: Integer? + + def max_completion_tokens=: (Integer) -> Integer + + attr_reader seed: Integer? + + def seed=: (Integer) -> Integer + + attr_reader temperature: Float? + + def temperature=: (Float) -> Float + + attr_reader text: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text? + + def text=: ( + OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text + ) -> OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text + + attr_reader tools: ::Array[OpenAI::Models::Responses::tool]? + + def tools=: ( + ::Array[OpenAI::Models::Responses::tool] + ) -> ::Array[OpenAI::Models::Responses::tool] + + attr_reader top_p: Float? + + def top_p=: (Float) -> Float + + def initialize: ( + ?max_completion_tokens: Integer, + ?seed: Integer, + ?temperature: Float, + ?text: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, + ?tools: ::Array[OpenAI::Models::Responses::tool], + ?top_p: Float + ) -> void + + def to_hash: -> { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float + } + + type text = + { + format_: OpenAI::Models::Responses::response_format_text_config + } + + class Text < OpenAI::Internal::Type::BaseModel + attr_reader format_: OpenAI::Models::Responses::response_format_text_config? + + def format_=: ( + OpenAI::Models::Responses::response_format_text_config + ) -> OpenAI::Models::Responses::response_format_text_config + + def initialize: ( + ?format_: OpenAI::Models::Responses::response_format_text_config + ) -> void + + def to_hash: -> { + format_: OpenAI::Models::Responses::response_format_text_config + } + end + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCancelResponse::data_source] + end + + type per_model_usage = + { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + attr_accessor cached_tokens: Integer + + attr_accessor completion_tokens: Integer + + attr_accessor invocation_count: Integer + + attr_accessor model_name: String + + attr_accessor prompt_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + end + + type per_testing_criteria_result = + { failed: Integer, passed: Integer, testing_criteria: String } + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + attr_accessor failed: Integer + + attr_accessor passed: Integer + + attr_accessor testing_criteria: String + + def initialize: ( + failed: Integer, + passed: Integer, + testing_criteria: String + ) -> void + + def to_hash: -> { + failed: Integer, + passed: Integer, + testing_criteria: String + } + end + + type result_counts = + { errored: Integer, failed: Integer, passed: Integer, total: Integer } + + class ResultCounts < OpenAI::Internal::Type::BaseModel + attr_accessor errored: Integer + + attr_accessor failed: Integer + + attr_accessor passed: Integer + + attr_accessor total: Integer + + def initialize: ( + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + ) -> void + + def to_hash: -> { + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + } + end + end + end + end +end diff --git a/sig/openai/models/evals/run_create_params.rbs b/sig/openai/models/evals/run_create_params.rbs new file mode 100644 index 00000000..c16120a9 --- /dev/null +++ b/sig/openai/models/evals/run_create_params.rbs @@ -0,0 +1,506 @@ +module OpenAI + module Models + module Evals + type run_create_params = + { + data_source: OpenAI::Models::Evals::RunCreateParams::data_source, + metadata: OpenAI::Models::metadata?, + name: String + } + & OpenAI::Internal::Type::request_parameters + + class RunCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor data_source: OpenAI::Models::Evals::RunCreateParams::data_source + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_reader name: String? + + def name=: (String) -> String + + def initialize: ( + data_source: OpenAI::Models::Evals::RunCreateParams::data_source, + ?metadata: OpenAI::Models::metadata?, + ?name: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + data_source: OpenAI::Models::Evals::RunCreateParams::data_source, + metadata: OpenAI::Models::metadata?, + name: String, + request_options: OpenAI::RequestOptions + } + + type data_source = + OpenAI::Evals::CreateEvalJSONLRunDataSource + | OpenAI::Evals::CreateEvalCompletionsRunDataSource + | OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource + + module DataSource + extend OpenAI::Internal::Type::Union + + type create_eval_responses_run_data_source = + { + source: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::source, + type: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::type_, + input_messages: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::input_messages, + model: String, + sampling_params: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams + } + + class CreateEvalResponsesRunDataSource < OpenAI::Internal::Type::BaseModel + attr_accessor source: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::source + + attr_accessor type: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::type_ + + attr_reader input_messages: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::input_messages? + + def input_messages=: ( + OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::input_messages + ) -> OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::input_messages + + attr_reader model: String? + + def model=: (String) -> String + + attr_reader sampling_params: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams? + + def sampling_params=: ( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams + ) -> OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams + + def initialize: ( + source: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::source, + type: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::type_, + ?input_messages: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::input_messages, + ?model: String, + ?sampling_params: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams + ) -> void + + def to_hash: -> { + source: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::source, + type: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::type_, + input_messages: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::input_messages, + model: String, + sampling_params: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams + } + + type source = + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent + | OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID + | OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses + + module Source + extend OpenAI::Internal::Type::Union + + type file_content = + { + content: ::Array[OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content], + type: :file_content + } + + class FileContent < OpenAI::Internal::Type::BaseModel + attr_accessor content: ::Array[OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content] + + attr_accessor type: :file_content + + def initialize: ( + content: ::Array[OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content], + ?type: :file_content + ) -> void + + def to_hash: -> { + content: ::Array[OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content], + type: :file_content + } + + type content = + { item: ::Hash[Symbol, top], sample: ::Hash[Symbol, top] } + + class Content < OpenAI::Internal::Type::BaseModel + attr_accessor item: ::Hash[Symbol, top] + + attr_reader sample: ::Hash[Symbol, top]? + + def sample=: (::Hash[Symbol, top]) -> ::Hash[Symbol, top] + + def initialize: ( + item: ::Hash[Symbol, top], + ?sample: ::Hash[Symbol, top] + ) -> void + + def to_hash: -> { + item: ::Hash[Symbol, top], + sample: ::Hash[Symbol, top] + } + end + end + + type file_id = { id: String, type: :file_id } + + class FileID < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor type: :file_id + + def initialize: (id: String, ?type: :file_id) -> void + + def to_hash: -> { id: String, type: :file_id } + end + + type responses = + { + type: :responses, + created_after: Integer?, + created_before: Integer?, + instructions_search: String?, + metadata: top?, + model: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + temperature: Float?, + tools: ::Array[String]?, + top_p: Float?, + users: ::Array[String]? + } + + class Responses < OpenAI::Internal::Type::BaseModel + attr_accessor type: :responses + + attr_accessor created_after: Integer? + + attr_accessor created_before: Integer? + + attr_accessor instructions_search: String? + + attr_accessor metadata: top? + + attr_accessor model: String? + + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + + attr_accessor temperature: Float? + + attr_accessor tools: ::Array[String]? + + attr_accessor top_p: Float? + + attr_accessor users: ::Array[String]? + + def initialize: ( + ?created_after: Integer?, + ?created_before: Integer?, + ?instructions_search: String?, + ?metadata: top?, + ?model: String?, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?temperature: Float?, + ?tools: ::Array[String]?, + ?top_p: Float?, + ?users: ::Array[String]?, + ?type: :responses + ) -> void + + def to_hash: -> { + type: :responses, + created_after: Integer?, + created_before: Integer?, + instructions_search: String?, + metadata: top?, + model: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + temperature: Float?, + tools: ::Array[String]?, + top_p: Float?, + users: ::Array[String]? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::source] + end + + type type_ = :responses + + module Type + extend OpenAI::Internal::Type::Enum + + RESPONSES: :responses + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::type_] + end + + type input_messages = + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template + | OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference + + module InputMessages + extend OpenAI::Internal::Type::Union + + type template = + { + template: ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::template], + type: :template + } + + class Template < OpenAI::Internal::Type::BaseModel + attr_accessor template: ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::template] + + attr_accessor type: :template + + def initialize: ( + template: ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::template], + ?type: :template + ) -> void + + def to_hash: -> { + template: ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::template], + type: :template + } + + type template = + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage + | OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem + + module Template + extend OpenAI::Internal::Type::Union + + type chat_message = { content: String, role: String } + + class ChatMessage < OpenAI::Internal::Type::BaseModel + attr_accessor content: String + + attr_accessor role: String + + def initialize: (content: String, role: String) -> void + + def to_hash: -> { content: String, role: String } + end + + type eval_item = + { + content: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::type_ + } + + class EvalItem < OpenAI::Internal::Type::BaseModel + attr_accessor content: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::content + + attr_accessor role: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::role + + attr_reader type: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::type_? + + def type=: ( + OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::type_ + ) -> OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::type_ + + def initialize: ( + content: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::role, + ?type: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::type_ + ) -> void + + def to_hash: -> { + content: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::type_ + } + + type content = + String + | OpenAI::Responses::ResponseInputText + | OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText + | OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage + | ::Array[top] + + module Content + extend OpenAI::Internal::Type::Union + + type output_text = { text: String, type: :output_text } + + class OutputText < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :output_text + + def initialize: ( + text: String, + ?type: :output_text + ) -> void + + def to_hash: -> { text: String, type: :output_text } + end + + type input_image = + { + image_url: String, + type: :input_image, + detail: String + } + + class InputImage < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: String + + attr_accessor type: :input_image + + attr_reader detail: String? + + def detail=: (String) -> String + + def initialize: ( + image_url: String, + ?detail: String, + ?type: :input_image + ) -> void + + def to_hash: -> { + image_url: String, + type: :input_image, + detail: String + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::content] + + AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter + end + + type role = :user | :assistant | :system | :developer + + module Role + extend OpenAI::Internal::Type::Enum + + USER: :user + ASSISTANT: :assistant + SYSTEM: :system + DEVELOPER: :developer + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::role] + end + + type type_ = :message + + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE: :message + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::type_] + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::template] + end + end + + type item_reference = + { item_reference: String, type: :item_reference } + + class ItemReference < OpenAI::Internal::Type::BaseModel + attr_accessor item_reference: String + + attr_accessor type: :item_reference + + def initialize: ( + item_reference: String, + ?type: :item_reference + ) -> void + + def to_hash: -> { + item_reference: String, + type: :item_reference + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::input_messages] + end + + type sampling_params = + { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float + } + + class SamplingParams < OpenAI::Internal::Type::BaseModel + attr_reader max_completion_tokens: Integer? + + def max_completion_tokens=: (Integer) -> Integer + + attr_reader seed: Integer? + + def seed=: (Integer) -> Integer + + attr_reader temperature: Float? + + def temperature=: (Float) -> Float + + attr_reader text: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text? + + def text=: ( + OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text + ) -> OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text + + attr_reader tools: ::Array[OpenAI::Models::Responses::tool]? + + def tools=: ( + ::Array[OpenAI::Models::Responses::tool] + ) -> ::Array[OpenAI::Models::Responses::tool] + + attr_reader top_p: Float? + + def top_p=: (Float) -> Float + + def initialize: ( + ?max_completion_tokens: Integer, + ?seed: Integer, + ?temperature: Float, + ?text: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, + ?tools: ::Array[OpenAI::Models::Responses::tool], + ?top_p: Float + ) -> void + + def to_hash: -> { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float + } + + type text = + { + format_: OpenAI::Models::Responses::response_format_text_config + } + + class Text < OpenAI::Internal::Type::BaseModel + attr_reader format_: OpenAI::Models::Responses::response_format_text_config? + + def format_=: ( + OpenAI::Models::Responses::response_format_text_config + ) -> OpenAI::Models::Responses::response_format_text_config + + def initialize: ( + ?format_: OpenAI::Models::Responses::response_format_text_config + ) -> void + + def to_hash: -> { + format_: OpenAI::Models::Responses::response_format_text_config + } + end + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateParams::data_source] + end + end + end + end +end diff --git a/sig/openai/models/evals/run_create_response.rbs b/sig/openai/models/evals/run_create_response.rbs new file mode 100644 index 00000000..d73a072b --- /dev/null +++ b/sig/openai/models/evals/run_create_response.rbs @@ -0,0 +1,635 @@ +module OpenAI + module Models + module Evals + type run_create_response = + { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunCreateResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + object: :"eval.run", + per_model_usage: ::Array[OpenAI::Models::Evals::RunCreateResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunCreateResponse::ResultCounts, + status: String + } + + class RunCreateResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data_source: OpenAI::Models::Evals::RunCreateResponse::data_source + + attr_accessor error: OpenAI::Evals::EvalAPIError + + attr_accessor eval_id: String + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_accessor model: String + + attr_accessor name: String + + attr_accessor object: :"eval.run" + + attr_accessor per_model_usage: ::Array[OpenAI::Models::Evals::RunCreateResponse::PerModelUsage] + + attr_accessor per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult] + + attr_accessor report_url: String + + attr_accessor result_counts: OpenAI::Models::Evals::RunCreateResponse::ResultCounts + + attr_accessor status: String + + def initialize: ( + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunCreateResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + per_model_usage: ::Array[OpenAI::Models::Evals::RunCreateResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunCreateResponse::ResultCounts, + status: String, + ?object: :"eval.run" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunCreateResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + object: :"eval.run", + per_model_usage: ::Array[OpenAI::Models::Evals::RunCreateResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunCreateResponse::ResultCounts, + status: String + } + + type data_source = + OpenAI::Evals::CreateEvalJSONLRunDataSource + | OpenAI::Evals::CreateEvalCompletionsRunDataSource + | OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses + + module DataSource + extend OpenAI::Internal::Type::Union + + type responses = + { + source: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::source, + type: :responses, + input_messages: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::input_messages, + model: String, + sampling_params: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams + } + + class Responses < OpenAI::Internal::Type::BaseModel + attr_accessor source: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::source + + attr_accessor type: :responses + + attr_reader input_messages: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::input_messages? + + def input_messages=: ( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::input_messages + ) -> OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::input_messages + + attr_reader model: String? + + def model=: (String) -> String + + attr_reader sampling_params: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams? + + def sampling_params=: ( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams + ) -> OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams + + def initialize: ( + source: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::source, + ?input_messages: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::input_messages, + ?model: String, + ?sampling_params: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams, + ?type: :responses + ) -> void + + def to_hash: -> { + source: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::source, + type: :responses, + input_messages: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::input_messages, + model: String, + sampling_params: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams + } + + type source = + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent + | OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID + | OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses + + module Source + extend OpenAI::Internal::Type::Union + + type file_content = + { + content: ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content], + type: :file_content + } + + class FileContent < OpenAI::Internal::Type::BaseModel + attr_accessor content: ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content] + + attr_accessor type: :file_content + + def initialize: ( + content: ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content], + ?type: :file_content + ) -> void + + def to_hash: -> { + content: ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content], + type: :file_content + } + + type content = + { item: ::Hash[Symbol, top], sample: ::Hash[Symbol, top] } + + class Content < OpenAI::Internal::Type::BaseModel + attr_accessor item: ::Hash[Symbol, top] + + attr_reader sample: ::Hash[Symbol, top]? + + def sample=: (::Hash[Symbol, top]) -> ::Hash[Symbol, top] + + def initialize: ( + item: ::Hash[Symbol, top], + ?sample: ::Hash[Symbol, top] + ) -> void + + def to_hash: -> { + item: ::Hash[Symbol, top], + sample: ::Hash[Symbol, top] + } + end + end + + type file_id = { id: String, type: :file_id } + + class FileID < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor type: :file_id + + def initialize: (id: String, ?type: :file_id) -> void + + def to_hash: -> { id: String, type: :file_id } + end + + type responses = + { + type: :responses, + created_after: Integer?, + created_before: Integer?, + instructions_search: String?, + metadata: top?, + model: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + temperature: Float?, + tools: ::Array[String]?, + top_p: Float?, + users: ::Array[String]? + } + + class Responses < OpenAI::Internal::Type::BaseModel + attr_accessor type: :responses + + attr_accessor created_after: Integer? + + attr_accessor created_before: Integer? + + attr_accessor instructions_search: String? + + attr_accessor metadata: top? + + attr_accessor model: String? + + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + + attr_accessor temperature: Float? + + attr_accessor tools: ::Array[String]? + + attr_accessor top_p: Float? + + attr_accessor users: ::Array[String]? + + def initialize: ( + ?created_after: Integer?, + ?created_before: Integer?, + ?instructions_search: String?, + ?metadata: top?, + ?model: String?, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?temperature: Float?, + ?tools: ::Array[String]?, + ?top_p: Float?, + ?users: ::Array[String]?, + ?type: :responses + ) -> void + + def to_hash: -> { + type: :responses, + created_after: Integer?, + created_before: Integer?, + instructions_search: String?, + metadata: top?, + model: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + temperature: Float?, + tools: ::Array[String]?, + top_p: Float?, + users: ::Array[String]? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::source] + end + + type input_messages = + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template + | OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference + + module InputMessages + extend OpenAI::Internal::Type::Union + + type template = + { + template: ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::template], + type: :template + } + + class Template < OpenAI::Internal::Type::BaseModel + attr_accessor template: ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::template] + + attr_accessor type: :template + + def initialize: ( + template: ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::template], + ?type: :template + ) -> void + + def to_hash: -> { + template: ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::template], + type: :template + } + + type template = + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage + | OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem + + module Template + extend OpenAI::Internal::Type::Union + + type chat_message = { content: String, role: String } + + class ChatMessage < OpenAI::Internal::Type::BaseModel + attr_accessor content: String + + attr_accessor role: String + + def initialize: (content: String, role: String) -> void + + def to_hash: -> { content: String, role: String } + end + + type eval_item = + { + content: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + } + + class EvalItem < OpenAI::Internal::Type::BaseModel + attr_accessor content: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content + + attr_accessor role: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role + + attr_reader type: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_? + + def type=: ( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + ) -> OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + + def initialize: ( + content: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + ?type: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + ) -> void + + def to_hash: -> { + content: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + } + + type content = + String + | OpenAI::Responses::ResponseInputText + | OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText + | OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage + | ::Array[top] + + module Content + extend OpenAI::Internal::Type::Union + + type output_text = { text: String, type: :output_text } + + class OutputText < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :output_text + + def initialize: ( + text: String, + ?type: :output_text + ) -> void + + def to_hash: -> { text: String, type: :output_text } + end + + type input_image = + { + image_url: String, + type: :input_image, + detail: String + } + + class InputImage < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: String + + attr_accessor type: :input_image + + attr_reader detail: String? + + def detail=: (String) -> String + + def initialize: ( + image_url: String, + ?detail: String, + ?type: :input_image + ) -> void + + def to_hash: -> { + image_url: String, + type: :input_image, + detail: String + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content] + + AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter + end + + type role = :user | :assistant | :system | :developer + + module Role + extend OpenAI::Internal::Type::Enum + + USER: :user + ASSISTANT: :assistant + SYSTEM: :system + DEVELOPER: :developer + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role] + end + + type type_ = :message + + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE: :message + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_] + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::template] + end + end + + type item_reference = + { item_reference: String, type: :item_reference } + + class ItemReference < OpenAI::Internal::Type::BaseModel + attr_accessor item_reference: String + + attr_accessor type: :item_reference + + def initialize: ( + item_reference: String, + ?type: :item_reference + ) -> void + + def to_hash: -> { + item_reference: String, + type: :item_reference + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::input_messages] + end + + type sampling_params = + { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float + } + + class SamplingParams < OpenAI::Internal::Type::BaseModel + attr_reader max_completion_tokens: Integer? + + def max_completion_tokens=: (Integer) -> Integer + + attr_reader seed: Integer? + + def seed=: (Integer) -> Integer + + attr_reader temperature: Float? + + def temperature=: (Float) -> Float + + attr_reader text: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text? + + def text=: ( + OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text + ) -> OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text + + attr_reader tools: ::Array[OpenAI::Models::Responses::tool]? + + def tools=: ( + ::Array[OpenAI::Models::Responses::tool] + ) -> ::Array[OpenAI::Models::Responses::tool] + + attr_reader top_p: Float? + + def top_p=: (Float) -> Float + + def initialize: ( + ?max_completion_tokens: Integer, + ?seed: Integer, + ?temperature: Float, + ?text: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text, + ?tools: ::Array[OpenAI::Models::Responses::tool], + ?top_p: Float + ) -> void + + def to_hash: -> { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float + } + + type text = + { + format_: OpenAI::Models::Responses::response_format_text_config + } + + class Text < OpenAI::Internal::Type::BaseModel + attr_reader format_: OpenAI::Models::Responses::response_format_text_config? + + def format_=: ( + OpenAI::Models::Responses::response_format_text_config + ) -> OpenAI::Models::Responses::response_format_text_config + + def initialize: ( + ?format_: OpenAI::Models::Responses::response_format_text_config + ) -> void + + def to_hash: -> { + format_: OpenAI::Models::Responses::response_format_text_config + } + end + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunCreateResponse::data_source] + end + + type per_model_usage = + { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + attr_accessor cached_tokens: Integer + + attr_accessor completion_tokens: Integer + + attr_accessor invocation_count: Integer + + attr_accessor model_name: String + + attr_accessor prompt_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + end + + type per_testing_criteria_result = + { failed: Integer, passed: Integer, testing_criteria: String } + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + attr_accessor failed: Integer + + attr_accessor passed: Integer + + attr_accessor testing_criteria: String + + def initialize: ( + failed: Integer, + passed: Integer, + testing_criteria: String + ) -> void + + def to_hash: -> { + failed: Integer, + passed: Integer, + testing_criteria: String + } + end + + type result_counts = + { errored: Integer, failed: Integer, passed: Integer, total: Integer } + + class ResultCounts < OpenAI::Internal::Type::BaseModel + attr_accessor errored: Integer + + attr_accessor failed: Integer + + attr_accessor passed: Integer + + attr_accessor total: Integer + + def initialize: ( + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + ) -> void + + def to_hash: -> { + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + } + end + end + end + end +end diff --git a/sig/openai/models/evals/run_delete_params.rbs b/sig/openai/models/evals/run_delete_params.rbs new file mode 100644 index 00000000..097144da --- /dev/null +++ b/sig/openai/models/evals/run_delete_params.rbs @@ -0,0 +1,25 @@ +module OpenAI + module Models + module Evals + type run_delete_params = + { eval_id: String } & OpenAI::Internal::Type::request_parameters + + class RunDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor eval_id: String + + def initialize: ( + eval_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + eval_id: String, + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/evals/run_delete_response.rbs b/sig/openai/models/evals/run_delete_response.rbs new file mode 100644 index 00000000..37a02bec --- /dev/null +++ b/sig/openai/models/evals/run_delete_response.rbs @@ -0,0 +1,30 @@ +module OpenAI + module Models + module Evals + type run_delete_response = + { deleted: bool, object: String, run_id: String } + + class RunDeleteResponse < OpenAI::Internal::Type::BaseModel + attr_reader deleted: bool? + + def deleted=: (bool) -> bool + + attr_reader object: String? + + def object=: (String) -> String + + attr_reader run_id: String? + + def run_id=: (String) -> String + + def initialize: ( + ?deleted: bool, + ?object: String, + ?run_id: String + ) -> void + + def to_hash: -> { deleted: bool, object: String, run_id: String } + end + end + end +end diff --git a/sig/openai/models/evals/run_list_params.rbs b/sig/openai/models/evals/run_list_params.rbs new file mode 100644 index 00000000..95f65ca4 --- /dev/null +++ b/sig/openai/models/evals/run_list_params.rbs @@ -0,0 +1,80 @@ +module OpenAI + module Models + module Evals + type run_list_params = + { + after: String, + limit: Integer, + order: OpenAI::Models::Evals::RunListParams::order, + status: OpenAI::Models::Evals::RunListParams::status + } + & OpenAI::Internal::Type::request_parameters + + class RunListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_reader after: String? + + def after=: (String) -> String + + attr_reader limit: Integer? + + def limit=: (Integer) -> Integer + + attr_reader order: OpenAI::Models::Evals::RunListParams::order? + + def order=: ( + OpenAI::Models::Evals::RunListParams::order + ) -> OpenAI::Models::Evals::RunListParams::order + + attr_reader status: OpenAI::Models::Evals::RunListParams::status? + + def status=: ( + OpenAI::Models::Evals::RunListParams::status + ) -> OpenAI::Models::Evals::RunListParams::status + + def initialize: ( + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::Evals::RunListParams::order, + ?status: OpenAI::Models::Evals::RunListParams::status, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + limit: Integer, + order: OpenAI::Models::Evals::RunListParams::order, + status: OpenAI::Models::Evals::RunListParams::status, + request_options: OpenAI::RequestOptions + } + + type order = :asc | :desc + + module Order + extend OpenAI::Internal::Type::Enum + + ASC: :asc + DESC: :desc + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunListParams::order] + end + + type status = :queued | :in_progress | :completed | :canceled | :failed + + module Status + extend OpenAI::Internal::Type::Enum + + QUEUED: :queued + IN_PROGRESS: :in_progress + COMPLETED: :completed + CANCELED: :canceled + FAILED: :failed + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunListParams::status] + end + end + end + end +end diff --git a/sig/openai/models/evals/run_list_response.rbs b/sig/openai/models/evals/run_list_response.rbs new file mode 100644 index 00000000..5e91e0f5 --- /dev/null +++ b/sig/openai/models/evals/run_list_response.rbs @@ -0,0 +1,635 @@ +module OpenAI + module Models + module Evals + type run_list_response = + { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunListResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + object: :"eval.run", + per_model_usage: ::Array[OpenAI::Models::Evals::RunListResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunListResponse::ResultCounts, + status: String + } + + class RunListResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data_source: OpenAI::Models::Evals::RunListResponse::data_source + + attr_accessor error: OpenAI::Evals::EvalAPIError + + attr_accessor eval_id: String + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_accessor model: String + + attr_accessor name: String + + attr_accessor object: :"eval.run" + + attr_accessor per_model_usage: ::Array[OpenAI::Models::Evals::RunListResponse::PerModelUsage] + + attr_accessor per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult] + + attr_accessor report_url: String + + attr_accessor result_counts: OpenAI::Models::Evals::RunListResponse::ResultCounts + + attr_accessor status: String + + def initialize: ( + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunListResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + per_model_usage: ::Array[OpenAI::Models::Evals::RunListResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunListResponse::ResultCounts, + status: String, + ?object: :"eval.run" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunListResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + object: :"eval.run", + per_model_usage: ::Array[OpenAI::Models::Evals::RunListResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunListResponse::ResultCounts, + status: String + } + + type data_source = + OpenAI::Evals::CreateEvalJSONLRunDataSource + | OpenAI::Evals::CreateEvalCompletionsRunDataSource + | OpenAI::Models::Evals::RunListResponse::DataSource::Responses + + module DataSource + extend OpenAI::Internal::Type::Union + + type responses = + { + source: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::source, + type: :responses, + input_messages: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::input_messages, + model: String, + sampling_params: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams + } + + class Responses < OpenAI::Internal::Type::BaseModel + attr_accessor source: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::source + + attr_accessor type: :responses + + attr_reader input_messages: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::input_messages? + + def input_messages=: ( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::input_messages + ) -> OpenAI::Models::Evals::RunListResponse::DataSource::Responses::input_messages + + attr_reader model: String? + + def model=: (String) -> String + + attr_reader sampling_params: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams? + + def sampling_params=: ( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams + ) -> OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams + + def initialize: ( + source: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::source, + ?input_messages: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::input_messages, + ?model: String, + ?sampling_params: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams, + ?type: :responses + ) -> void + + def to_hash: -> { + source: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::source, + type: :responses, + input_messages: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::input_messages, + model: String, + sampling_params: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams + } + + type source = + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent + | OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID + | OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses + + module Source + extend OpenAI::Internal::Type::Union + + type file_content = + { + content: ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content], + type: :file_content + } + + class FileContent < OpenAI::Internal::Type::BaseModel + attr_accessor content: ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content] + + attr_accessor type: :file_content + + def initialize: ( + content: ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content], + ?type: :file_content + ) -> void + + def to_hash: -> { + content: ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content], + type: :file_content + } + + type content = + { item: ::Hash[Symbol, top], sample: ::Hash[Symbol, top] } + + class Content < OpenAI::Internal::Type::BaseModel + attr_accessor item: ::Hash[Symbol, top] + + attr_reader sample: ::Hash[Symbol, top]? + + def sample=: (::Hash[Symbol, top]) -> ::Hash[Symbol, top] + + def initialize: ( + item: ::Hash[Symbol, top], + ?sample: ::Hash[Symbol, top] + ) -> void + + def to_hash: -> { + item: ::Hash[Symbol, top], + sample: ::Hash[Symbol, top] + } + end + end + + type file_id = { id: String, type: :file_id } + + class FileID < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor type: :file_id + + def initialize: (id: String, ?type: :file_id) -> void + + def to_hash: -> { id: String, type: :file_id } + end + + type responses = + { + type: :responses, + created_after: Integer?, + created_before: Integer?, + instructions_search: String?, + metadata: top?, + model: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + temperature: Float?, + tools: ::Array[String]?, + top_p: Float?, + users: ::Array[String]? + } + + class Responses < OpenAI::Internal::Type::BaseModel + attr_accessor type: :responses + + attr_accessor created_after: Integer? + + attr_accessor created_before: Integer? + + attr_accessor instructions_search: String? + + attr_accessor metadata: top? + + attr_accessor model: String? + + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + + attr_accessor temperature: Float? + + attr_accessor tools: ::Array[String]? + + attr_accessor top_p: Float? + + attr_accessor users: ::Array[String]? + + def initialize: ( + ?created_after: Integer?, + ?created_before: Integer?, + ?instructions_search: String?, + ?metadata: top?, + ?model: String?, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?temperature: Float?, + ?tools: ::Array[String]?, + ?top_p: Float?, + ?users: ::Array[String]?, + ?type: :responses + ) -> void + + def to_hash: -> { + type: :responses, + created_after: Integer?, + created_before: Integer?, + instructions_search: String?, + metadata: top?, + model: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + temperature: Float?, + tools: ::Array[String]?, + top_p: Float?, + users: ::Array[String]? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::source] + end + + type input_messages = + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template + | OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference + + module InputMessages + extend OpenAI::Internal::Type::Union + + type template = + { + template: ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::template], + type: :template + } + + class Template < OpenAI::Internal::Type::BaseModel + attr_accessor template: ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::template] + + attr_accessor type: :template + + def initialize: ( + template: ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::template], + ?type: :template + ) -> void + + def to_hash: -> { + template: ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::template], + type: :template + } + + type template = + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage + | OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem + + module Template + extend OpenAI::Internal::Type::Union + + type chat_message = { content: String, role: String } + + class ChatMessage < OpenAI::Internal::Type::BaseModel + attr_accessor content: String + + attr_accessor role: String + + def initialize: (content: String, role: String) -> void + + def to_hash: -> { content: String, role: String } + end + + type eval_item = + { + content: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + } + + class EvalItem < OpenAI::Internal::Type::BaseModel + attr_accessor content: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content + + attr_accessor role: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role + + attr_reader type: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_? + + def type=: ( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + ) -> OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + + def initialize: ( + content: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + ?type: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + ) -> void + + def to_hash: -> { + content: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + } + + type content = + String + | OpenAI::Responses::ResponseInputText + | OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText + | OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage + | ::Array[top] + + module Content + extend OpenAI::Internal::Type::Union + + type output_text = { text: String, type: :output_text } + + class OutputText < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :output_text + + def initialize: ( + text: String, + ?type: :output_text + ) -> void + + def to_hash: -> { text: String, type: :output_text } + end + + type input_image = + { + image_url: String, + type: :input_image, + detail: String + } + + class InputImage < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: String + + attr_accessor type: :input_image + + attr_reader detail: String? + + def detail=: (String) -> String + + def initialize: ( + image_url: String, + ?detail: String, + ?type: :input_image + ) -> void + + def to_hash: -> { + image_url: String, + type: :input_image, + detail: String + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content] + + AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter + end + + type role = :user | :assistant | :system | :developer + + module Role + extend OpenAI::Internal::Type::Enum + + USER: :user + ASSISTANT: :assistant + SYSTEM: :system + DEVELOPER: :developer + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role] + end + + type type_ = :message + + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE: :message + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_] + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::template] + end + end + + type item_reference = + { item_reference: String, type: :item_reference } + + class ItemReference < OpenAI::Internal::Type::BaseModel + attr_accessor item_reference: String + + attr_accessor type: :item_reference + + def initialize: ( + item_reference: String, + ?type: :item_reference + ) -> void + + def to_hash: -> { + item_reference: String, + type: :item_reference + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::input_messages] + end + + type sampling_params = + { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float + } + + class SamplingParams < OpenAI::Internal::Type::BaseModel + attr_reader max_completion_tokens: Integer? + + def max_completion_tokens=: (Integer) -> Integer + + attr_reader seed: Integer? + + def seed=: (Integer) -> Integer + + attr_reader temperature: Float? + + def temperature=: (Float) -> Float + + attr_reader text: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text? + + def text=: ( + OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text + ) -> OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text + + attr_reader tools: ::Array[OpenAI::Models::Responses::tool]? + + def tools=: ( + ::Array[OpenAI::Models::Responses::tool] + ) -> ::Array[OpenAI::Models::Responses::tool] + + attr_reader top_p: Float? + + def top_p=: (Float) -> Float + + def initialize: ( + ?max_completion_tokens: Integer, + ?seed: Integer, + ?temperature: Float, + ?text: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text, + ?tools: ::Array[OpenAI::Models::Responses::tool], + ?top_p: Float + ) -> void + + def to_hash: -> { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float + } + + type text = + { + format_: OpenAI::Models::Responses::response_format_text_config + } + + class Text < OpenAI::Internal::Type::BaseModel + attr_reader format_: OpenAI::Models::Responses::response_format_text_config? + + def format_=: ( + OpenAI::Models::Responses::response_format_text_config + ) -> OpenAI::Models::Responses::response_format_text_config + + def initialize: ( + ?format_: OpenAI::Models::Responses::response_format_text_config + ) -> void + + def to_hash: -> { + format_: OpenAI::Models::Responses::response_format_text_config + } + end + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunListResponse::data_source] + end + + type per_model_usage = + { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + attr_accessor cached_tokens: Integer + + attr_accessor completion_tokens: Integer + + attr_accessor invocation_count: Integer + + attr_accessor model_name: String + + attr_accessor prompt_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + end + + type per_testing_criteria_result = + { failed: Integer, passed: Integer, testing_criteria: String } + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + attr_accessor failed: Integer + + attr_accessor passed: Integer + + attr_accessor testing_criteria: String + + def initialize: ( + failed: Integer, + passed: Integer, + testing_criteria: String + ) -> void + + def to_hash: -> { + failed: Integer, + passed: Integer, + testing_criteria: String + } + end + + type result_counts = + { errored: Integer, failed: Integer, passed: Integer, total: Integer } + + class ResultCounts < OpenAI::Internal::Type::BaseModel + attr_accessor errored: Integer + + attr_accessor failed: Integer + + attr_accessor passed: Integer + + attr_accessor total: Integer + + def initialize: ( + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + ) -> void + + def to_hash: -> { + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + } + end + end + end + end +end diff --git a/sig/openai/models/evals/run_retrieve_params.rbs b/sig/openai/models/evals/run_retrieve_params.rbs new file mode 100644 index 00000000..bc35ad38 --- /dev/null +++ b/sig/openai/models/evals/run_retrieve_params.rbs @@ -0,0 +1,25 @@ +module OpenAI + module Models + module Evals + type run_retrieve_params = + { eval_id: String } & OpenAI::Internal::Type::request_parameters + + class RunRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor eval_id: String + + def initialize: ( + eval_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + eval_id: String, + request_options: OpenAI::RequestOptions + } + end + end + end +end diff --git a/sig/openai/models/evals/run_retrieve_response.rbs b/sig/openai/models/evals/run_retrieve_response.rbs new file mode 100644 index 00000000..874cf7d0 --- /dev/null +++ b/sig/openai/models/evals/run_retrieve_response.rbs @@ -0,0 +1,635 @@ +module OpenAI + module Models + module Evals + type run_retrieve_response = + { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunRetrieveResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + object: :"eval.run", + per_model_usage: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts, + status: String + } + + class RunRetrieveResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data_source: OpenAI::Models::Evals::RunRetrieveResponse::data_source + + attr_accessor error: OpenAI::Evals::EvalAPIError + + attr_accessor eval_id: String + + attr_accessor metadata: OpenAI::Models::metadata? + + attr_accessor model: String + + attr_accessor name: String + + attr_accessor object: :"eval.run" + + attr_accessor per_model_usage: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage] + + attr_accessor per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult] + + attr_accessor report_url: String + + attr_accessor result_counts: OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts + + attr_accessor status: String + + def initialize: ( + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunRetrieveResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + per_model_usage: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts, + status: String, + ?object: :"eval.run" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunRetrieveResponse::data_source, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: OpenAI::Models::metadata?, + model: String, + name: String, + object: :"eval.run", + per_model_usage: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage], + per_testing_criteria_results: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult], + report_url: String, + result_counts: OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts, + status: String + } + + type data_source = + OpenAI::Evals::CreateEvalJSONLRunDataSource + | OpenAI::Evals::CreateEvalCompletionsRunDataSource + | OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses + + module DataSource + extend OpenAI::Internal::Type::Union + + type responses = + { + source: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::source, + type: :responses, + input_messages: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::input_messages, + model: String, + sampling_params: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams + } + + class Responses < OpenAI::Internal::Type::BaseModel + attr_accessor source: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::source + + attr_accessor type: :responses + + attr_reader input_messages: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::input_messages? + + def input_messages=: ( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::input_messages + ) -> OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::input_messages + + attr_reader model: String? + + def model=: (String) -> String + + attr_reader sampling_params: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams? + + def sampling_params=: ( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams + ) -> OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams + + def initialize: ( + source: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::source, + ?input_messages: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::input_messages, + ?model: String, + ?sampling_params: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams, + ?type: :responses + ) -> void + + def to_hash: -> { + source: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::source, + type: :responses, + input_messages: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::input_messages, + model: String, + sampling_params: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams + } + + type source = + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent + | OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID + | OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses + + module Source + extend OpenAI::Internal::Type::Union + + type file_content = + { + content: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content], + type: :file_content + } + + class FileContent < OpenAI::Internal::Type::BaseModel + attr_accessor content: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content] + + attr_accessor type: :file_content + + def initialize: ( + content: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content], + ?type: :file_content + ) -> void + + def to_hash: -> { + content: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content], + type: :file_content + } + + type content = + { item: ::Hash[Symbol, top], sample: ::Hash[Symbol, top] } + + class Content < OpenAI::Internal::Type::BaseModel + attr_accessor item: ::Hash[Symbol, top] + + attr_reader sample: ::Hash[Symbol, top]? + + def sample=: (::Hash[Symbol, top]) -> ::Hash[Symbol, top] + + def initialize: ( + item: ::Hash[Symbol, top], + ?sample: ::Hash[Symbol, top] + ) -> void + + def to_hash: -> { + item: ::Hash[Symbol, top], + sample: ::Hash[Symbol, top] + } + end + end + + type file_id = { id: String, type: :file_id } + + class FileID < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor type: :file_id + + def initialize: (id: String, ?type: :file_id) -> void + + def to_hash: -> { id: String, type: :file_id } + end + + type responses = + { + type: :responses, + created_after: Integer?, + created_before: Integer?, + instructions_search: String?, + metadata: top?, + model: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + temperature: Float?, + tools: ::Array[String]?, + top_p: Float?, + users: ::Array[String]? + } + + class Responses < OpenAI::Internal::Type::BaseModel + attr_accessor type: :responses + + attr_accessor created_after: Integer? + + attr_accessor created_before: Integer? + + attr_accessor instructions_search: String? + + attr_accessor metadata: top? + + attr_accessor model: String? + + attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort? + + attr_accessor temperature: Float? + + attr_accessor tools: ::Array[String]? + + attr_accessor top_p: Float? + + attr_accessor users: ::Array[String]? + + def initialize: ( + ?created_after: Integer?, + ?created_before: Integer?, + ?instructions_search: String?, + ?metadata: top?, + ?model: String?, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?temperature: Float?, + ?tools: ::Array[String]?, + ?top_p: Float?, + ?users: ::Array[String]?, + ?type: :responses + ) -> void + + def to_hash: -> { + type: :responses, + created_after: Integer?, + created_before: Integer?, + instructions_search: String?, + metadata: top?, + model: String?, + reasoning_effort: OpenAI::Models::reasoning_effort?, + temperature: Float?, + tools: ::Array[String]?, + top_p: Float?, + users: ::Array[String]? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::source] + end + + type input_messages = + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template + | OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference + + module InputMessages + extend OpenAI::Internal::Type::Union + + type template = + { + template: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::template], + type: :template + } + + class Template < OpenAI::Internal::Type::BaseModel + attr_accessor template: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::template] + + attr_accessor type: :template + + def initialize: ( + template: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::template], + ?type: :template + ) -> void + + def to_hash: -> { + template: ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::template], + type: :template + } + + type template = + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage + | OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem + + module Template + extend OpenAI::Internal::Type::Union + + type chat_message = { content: String, role: String } + + class ChatMessage < OpenAI::Internal::Type::BaseModel + attr_accessor content: String + + attr_accessor role: String + + def initialize: (content: String, role: String) -> void + + def to_hash: -> { content: String, role: String } + end + + type eval_item = + { + content: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + } + + class EvalItem < OpenAI::Internal::Type::BaseModel + attr_accessor content: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content + + attr_accessor role: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role + + attr_reader type: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_? + + def type=: ( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + ) -> OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + + def initialize: ( + content: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + ?type: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + ) -> void + + def to_hash: -> { + content: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content, + role: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role, + type: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_ + } + + type content = + String + | OpenAI::Responses::ResponseInputText + | OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText + | OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage + | ::Array[top] + + module Content + extend OpenAI::Internal::Type::Union + + type output_text = { text: String, type: :output_text } + + class OutputText < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :output_text + + def initialize: ( + text: String, + ?type: :output_text + ) -> void + + def to_hash: -> { text: String, type: :output_text } + end + + type input_image = + { + image_url: String, + type: :input_image, + detail: String + } + + class InputImage < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: String + + attr_accessor type: :input_image + + attr_reader detail: String? + + def detail=: (String) -> String + + def initialize: ( + image_url: String, + ?detail: String, + ?type: :input_image + ) -> void + + def to_hash: -> { + image_url: String, + type: :input_image, + detail: String + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::content] + + AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter + end + + type role = :user | :assistant | :system | :developer + + module Role + extend OpenAI::Internal::Type::Enum + + USER: :user + ASSISTANT: :assistant + SYSTEM: :system + DEVELOPER: :developer + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::role] + end + + type type_ = :message + + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE: :message + + def self?.values: -> ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::type_] + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::template] + end + end + + type item_reference = + { item_reference: String, type: :item_reference } + + class ItemReference < OpenAI::Internal::Type::BaseModel + attr_accessor item_reference: String + + attr_accessor type: :item_reference + + def initialize: ( + item_reference: String, + ?type: :item_reference + ) -> void + + def to_hash: -> { + item_reference: String, + type: :item_reference + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::input_messages] + end + + type sampling_params = + { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float + } + + class SamplingParams < OpenAI::Internal::Type::BaseModel + attr_reader max_completion_tokens: Integer? + + def max_completion_tokens=: (Integer) -> Integer + + attr_reader seed: Integer? + + def seed=: (Integer) -> Integer + + attr_reader temperature: Float? + + def temperature=: (Float) -> Float + + attr_reader text: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text? + + def text=: ( + OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text + ) -> OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text + + attr_reader tools: ::Array[OpenAI::Models::Responses::tool]? + + def tools=: ( + ::Array[OpenAI::Models::Responses::tool] + ) -> ::Array[OpenAI::Models::Responses::tool] + + attr_reader top_p: Float? + + def top_p=: (Float) -> Float + + def initialize: ( + ?max_completion_tokens: Integer, + ?seed: Integer, + ?temperature: Float, + ?text: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, + ?tools: ::Array[OpenAI::Models::Responses::tool], + ?top_p: Float + ) -> void + + def to_hash: -> { + max_completion_tokens: Integer, + seed: Integer, + temperature: Float, + text: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float + } + + type text = + { + format_: OpenAI::Models::Responses::response_format_text_config + } + + class Text < OpenAI::Internal::Type::BaseModel + attr_reader format_: OpenAI::Models::Responses::response_format_text_config? + + def format_=: ( + OpenAI::Models::Responses::response_format_text_config + ) -> OpenAI::Models::Responses::response_format_text_config + + def initialize: ( + ?format_: OpenAI::Models::Responses::response_format_text_config + ) -> void + + def to_hash: -> { + format_: OpenAI::Models::Responses::response_format_text_config + } + end + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Evals::RunRetrieveResponse::data_source] + end + + type per_model_usage = + { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + + class PerModelUsage < OpenAI::Internal::Type::BaseModel + attr_accessor cached_tokens: Integer + + attr_accessor completion_tokens: Integer + + attr_accessor invocation_count: Integer + + attr_accessor model_name: String + + attr_accessor prompt_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + cached_tokens: Integer, + completion_tokens: Integer, + invocation_count: Integer, + model_name: String, + prompt_tokens: Integer, + total_tokens: Integer + } + end + + type per_testing_criteria_result = + { failed: Integer, passed: Integer, testing_criteria: String } + + class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel + attr_accessor failed: Integer + + attr_accessor passed: Integer + + attr_accessor testing_criteria: String + + def initialize: ( + failed: Integer, + passed: Integer, + testing_criteria: String + ) -> void + + def to_hash: -> { + failed: Integer, + passed: Integer, + testing_criteria: String + } + end + + type result_counts = + { errored: Integer, failed: Integer, passed: Integer, total: Integer } + + class ResultCounts < OpenAI::Internal::Type::BaseModel + attr_accessor errored: Integer + + attr_accessor failed: Integer + + attr_accessor passed: Integer + + attr_accessor total: Integer + + def initialize: ( + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + ) -> void + + def to_hash: -> { + errored: Integer, + failed: Integer, + passed: Integer, + total: Integer + } + end + end + end + end +end diff --git a/sig/openai/models/evals/runs/output_item_list_params.rbs b/sig/openai/models/evals/runs/output_item_list_params.rbs new file mode 100644 index 00000000..650dddf9 --- /dev/null +++ b/sig/openai/models/evals/runs/output_item_list_params.rbs @@ -0,0 +1,84 @@ +module OpenAI + module Models + module Evals + module Runs + type output_item_list_params = + { + eval_id: String, + after: String, + limit: Integer, + order: OpenAI::Models::Evals::Runs::OutputItemListParams::order, + status: OpenAI::Models::Evals::Runs::OutputItemListParams::status + } + & OpenAI::Internal::Type::request_parameters + + class OutputItemListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor eval_id: String + + attr_reader after: String? + + def after=: (String) -> String + + attr_reader limit: Integer? + + def limit=: (Integer) -> Integer + + attr_reader order: OpenAI::Models::Evals::Runs::OutputItemListParams::order? + + def order=: ( + OpenAI::Models::Evals::Runs::OutputItemListParams::order + ) -> OpenAI::Models::Evals::Runs::OutputItemListParams::order + + attr_reader status: OpenAI::Models::Evals::Runs::OutputItemListParams::status? + + def status=: ( + OpenAI::Models::Evals::Runs::OutputItemListParams::status + ) -> OpenAI::Models::Evals::Runs::OutputItemListParams::status + + def initialize: ( + eval_id: String, + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::Evals::Runs::OutputItemListParams::order, + ?status: OpenAI::Models::Evals::Runs::OutputItemListParams::status, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + eval_id: String, + after: String, + limit: Integer, + order: OpenAI::Models::Evals::Runs::OutputItemListParams::order, + status: OpenAI::Models::Evals::Runs::OutputItemListParams::status, + request_options: OpenAI::RequestOptions + } + + type order = :asc | :desc + + module Order + extend OpenAI::Internal::Type::Enum + + ASC: :asc + DESC: :desc + + def self?.values: -> ::Array[OpenAI::Models::Evals::Runs::OutputItemListParams::order] + end + + type status = :fail | :pass + + module Status + extend OpenAI::Internal::Type::Enum + + FAIL: :fail + PASS: :pass + + def self?.values: -> ::Array[OpenAI::Models::Evals::Runs::OutputItemListParams::status] + end + end + end + end + end +end diff --git a/sig/openai/models/evals/runs/output_item_list_response.rbs b/sig/openai/models/evals/runs/output_item_list_response.rbs new file mode 100644 index 00000000..53dfbc98 --- /dev/null +++ b/sig/openai/models/evals/runs/output_item_list_response.rbs @@ -0,0 +1,191 @@ +module OpenAI + module Models + module Evals + module Runs + type output_item_list_response = + { + id: String, + created_at: Integer, + datasource_item: ::Hash[Symbol, top], + datasource_item_id: Integer, + eval_id: String, + object: :"eval.run.output_item", + results: ::Array[::Hash[Symbol, top]], + run_id: String, + sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, + status: String + } + + class OutputItemListResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor datasource_item: ::Hash[Symbol, top] + + attr_accessor datasource_item_id: Integer + + attr_accessor eval_id: String + + attr_accessor object: :"eval.run.output_item" + + attr_accessor results: ::Array[::Hash[Symbol, top]] + + attr_accessor run_id: String + + attr_accessor sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample + + attr_accessor status: String + + def initialize: ( + id: String, + created_at: Integer, + datasource_item: ::Hash[Symbol, top], + datasource_item_id: Integer, + eval_id: String, + results: ::Array[::Hash[Symbol, top]], + run_id: String, + sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, + status: String, + ?object: :"eval.run.output_item" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + datasource_item: ::Hash[Symbol, top], + datasource_item_id: Integer, + eval_id: String, + object: :"eval.run.output_item", + results: ::Array[::Hash[Symbol, top]], + run_id: String, + sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, + status: String + } + + type sample = + { + error: OpenAI::Evals::EvalAPIError, + finish_reason: String, + input: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input], + max_completion_tokens: Integer, + model: String, + output: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output], + seed: Integer, + temperature: Float, + top_p: Float, + usage: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage + } + + class Sample < OpenAI::Internal::Type::BaseModel + attr_accessor error: OpenAI::Evals::EvalAPIError + + attr_accessor finish_reason: String + + attr_accessor input: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input] + + attr_accessor max_completion_tokens: Integer + + attr_accessor model: String + + attr_accessor output: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output] + + attr_accessor seed: Integer + + attr_accessor temperature: Float + + attr_accessor top_p: Float + + attr_accessor usage: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage + + def initialize: ( + error: OpenAI::Evals::EvalAPIError, + finish_reason: String, + input: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input], + max_completion_tokens: Integer, + model: String, + output: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output], + seed: Integer, + temperature: Float, + top_p: Float, + usage: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage + ) -> void + + def to_hash: -> { + error: OpenAI::Evals::EvalAPIError, + finish_reason: String, + input: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input], + max_completion_tokens: Integer, + model: String, + output: ::Array[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output], + seed: Integer, + temperature: Float, + top_p: Float, + usage: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage + } + + type input = { content: String, role: String } + + class Input < OpenAI::Internal::Type::BaseModel + attr_accessor content: String + + attr_accessor role: String + + def initialize: (content: String, role: String) -> void + + def to_hash: -> { content: String, role: String } + end + + type output = { content: String, role: String } + + class Output < OpenAI::Internal::Type::BaseModel + attr_reader content: String? + + def content=: (String) -> String + + attr_reader role: String? + + def role=: (String) -> String + + def initialize: (?content: String, ?role: String) -> void + + def to_hash: -> { content: String, role: String } + end + + type usage = + { + cached_tokens: Integer, + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + } + + class Usage < OpenAI::Internal::Type::BaseModel + attr_accessor cached_tokens: Integer + + attr_accessor completion_tokens: Integer + + attr_accessor prompt_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + cached_tokens: Integer, + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + cached_tokens: Integer, + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + } + end + end + end + end + end + end +end diff --git a/sig/openai/models/evals/runs/output_item_retrieve_params.rbs b/sig/openai/models/evals/runs/output_item_retrieve_params.rbs new file mode 100644 index 00000000..65456919 --- /dev/null +++ b/sig/openai/models/evals/runs/output_item_retrieve_params.rbs @@ -0,0 +1,32 @@ +module OpenAI + module Models + module Evals + module Runs + type output_item_retrieve_params = + { eval_id: String, run_id: String } + & OpenAI::Internal::Type::request_parameters + + class OutputItemRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor eval_id: String + + attr_accessor run_id: String + + def initialize: ( + eval_id: String, + run_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + eval_id: String, + run_id: String, + request_options: OpenAI::RequestOptions + } + end + end + end + end +end diff --git a/sig/openai/models/evals/runs/output_item_retrieve_response.rbs b/sig/openai/models/evals/runs/output_item_retrieve_response.rbs new file mode 100644 index 00000000..70c37909 --- /dev/null +++ b/sig/openai/models/evals/runs/output_item_retrieve_response.rbs @@ -0,0 +1,191 @@ +module OpenAI + module Models + module Evals + module Runs + type output_item_retrieve_response = + { + id: String, + created_at: Integer, + datasource_item: ::Hash[Symbol, top], + datasource_item_id: Integer, + eval_id: String, + object: :"eval.run.output_item", + results: ::Array[::Hash[Symbol, top]], + run_id: String, + sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, + status: String + } + + class OutputItemRetrieveResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor datasource_item: ::Hash[Symbol, top] + + attr_accessor datasource_item_id: Integer + + attr_accessor eval_id: String + + attr_accessor object: :"eval.run.output_item" + + attr_accessor results: ::Array[::Hash[Symbol, top]] + + attr_accessor run_id: String + + attr_accessor sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample + + attr_accessor status: String + + def initialize: ( + id: String, + created_at: Integer, + datasource_item: ::Hash[Symbol, top], + datasource_item_id: Integer, + eval_id: String, + results: ::Array[::Hash[Symbol, top]], + run_id: String, + sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, + status: String, + ?object: :"eval.run.output_item" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + datasource_item: ::Hash[Symbol, top], + datasource_item_id: Integer, + eval_id: String, + object: :"eval.run.output_item", + results: ::Array[::Hash[Symbol, top]], + run_id: String, + sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, + status: String + } + + type sample = + { + error: OpenAI::Evals::EvalAPIError, + finish_reason: String, + input: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input], + max_completion_tokens: Integer, + model: String, + output: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output], + seed: Integer, + temperature: Float, + top_p: Float, + usage: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage + } + + class Sample < OpenAI::Internal::Type::BaseModel + attr_accessor error: OpenAI::Evals::EvalAPIError + + attr_accessor finish_reason: String + + attr_accessor input: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input] + + attr_accessor max_completion_tokens: Integer + + attr_accessor model: String + + attr_accessor output: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output] + + attr_accessor seed: Integer + + attr_accessor temperature: Float + + attr_accessor top_p: Float + + attr_accessor usage: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage + + def initialize: ( + error: OpenAI::Evals::EvalAPIError, + finish_reason: String, + input: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input], + max_completion_tokens: Integer, + model: String, + output: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output], + seed: Integer, + temperature: Float, + top_p: Float, + usage: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage + ) -> void + + def to_hash: -> { + error: OpenAI::Evals::EvalAPIError, + finish_reason: String, + input: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input], + max_completion_tokens: Integer, + model: String, + output: ::Array[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output], + seed: Integer, + temperature: Float, + top_p: Float, + usage: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage + } + + type input = { content: String, role: String } + + class Input < OpenAI::Internal::Type::BaseModel + attr_accessor content: String + + attr_accessor role: String + + def initialize: (content: String, role: String) -> void + + def to_hash: -> { content: String, role: String } + end + + type output = { content: String, role: String } + + class Output < OpenAI::Internal::Type::BaseModel + attr_reader content: String? + + def content=: (String) -> String + + attr_reader role: String? + + def role=: (String) -> String + + def initialize: (?content: String, ?role: String) -> void + + def to_hash: -> { content: String, role: String } + end + + type usage = + { + cached_tokens: Integer, + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + } + + class Usage < OpenAI::Internal::Type::BaseModel + attr_accessor cached_tokens: Integer + + attr_accessor completion_tokens: Integer + + attr_accessor prompt_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + cached_tokens: Integer, + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + cached_tokens: Integer, + completion_tokens: Integer, + prompt_tokens: Integer, + total_tokens: Integer + } + end + end + end + end + end + end +end diff --git a/sig/openai/models/file_chunking_strategy.rbs b/sig/openai/models/file_chunking_strategy.rbs index 48e1a062..5f6d7277 100644 --- a/sig/openai/models/file_chunking_strategy.rbs +++ b/sig/openai/models/file_chunking_strategy.rbs @@ -1,11 +1,13 @@ module OpenAI module Models type file_chunking_strategy = - OpenAI::Models::StaticFileChunkingStrategyObject - | OpenAI::Models::OtherFileChunkingStrategyObject + OpenAI::StaticFileChunkingStrategyObject + | OpenAI::OtherFileChunkingStrategyObject - class FileChunkingStrategy < OpenAI::Union - private def self.variants: -> [[:static, OpenAI::Models::StaticFileChunkingStrategyObject], [:other, OpenAI::Models::OtherFileChunkingStrategyObject]] + module FileChunkingStrategy + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::file_chunking_strategy] end end end diff --git a/sig/openai/models/file_chunking_strategy_param.rbs b/sig/openai/models/file_chunking_strategy_param.rbs index 85961a3f..434e01b4 100644 --- a/sig/openai/models/file_chunking_strategy_param.rbs +++ b/sig/openai/models/file_chunking_strategy_param.rbs @@ -1,11 +1,13 @@ module OpenAI module Models type file_chunking_strategy_param = - OpenAI::Models::AutoFileChunkingStrategyParam - | OpenAI::Models::StaticFileChunkingStrategyObjectParam + OpenAI::AutoFileChunkingStrategyParam + | OpenAI::StaticFileChunkingStrategyObjectParam - class FileChunkingStrategyParam < OpenAI::Union - private def self.variants: -> [[:auto, OpenAI::Models::AutoFileChunkingStrategyParam], [:static, OpenAI::Models::StaticFileChunkingStrategyObjectParam]] + module FileChunkingStrategyParam + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::file_chunking_strategy_param] end end end diff --git a/sig/openai/models/file_content.rbs b/sig/openai/models/file_content.rbs new file mode 100644 index 00000000..947667c0 --- /dev/null +++ b/sig/openai/models/file_content.rbs @@ -0,0 +1,5 @@ +module OpenAI + module Models + class FileContent = String + end +end diff --git a/sig/openai/models/file_content_params.rbs b/sig/openai/models/file_content_params.rbs index 0ecb693f..c36aedab 100644 --- a/sig/openai/models/file_content_params.rbs +++ b/sig/openai/models/file_content_params.rbs @@ -1,18 +1,14 @@ module OpenAI module Models - type file_content_params = { } & OpenAI::request_parameters + type file_content_params = { } & OpenAI::Internal::Type::request_parameters - class FileContentParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileContentParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::file_content_params | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::file_content_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/file_create_params.rbs b/sig/openai/models/file_create_params.rbs index 70531f53..90ff00c6 100644 --- a/sig/openai/models/file_create_params.rbs +++ b/sig/openai/models/file_create_params.rbs @@ -1,26 +1,52 @@ module OpenAI module Models type file_create_params = - { file: (IO | StringIO), purpose: OpenAI::Models::file_purpose } - & OpenAI::request_parameters + { + file: OpenAI::Internal::file_input, + purpose: OpenAI::Models::file_purpose, + expires_after: OpenAI::FileCreateParams::ExpiresAfter + } + & OpenAI::Internal::Type::request_parameters - class FileCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - attr_accessor file: IO | StringIO + attr_accessor file: OpenAI::Internal::file_input attr_accessor purpose: OpenAI::Models::file_purpose - def initialize: - ( - file: IO | StringIO, - purpose: OpenAI::Models::file_purpose, - request_options: OpenAI::request_opts - ) -> void - | (?OpenAI::Models::file_create_params | OpenAI::BaseModel data) -> void + attr_reader expires_after: OpenAI::FileCreateParams::ExpiresAfter? - def to_hash: -> OpenAI::Models::file_create_params + def expires_after=: ( + OpenAI::FileCreateParams::ExpiresAfter + ) -> OpenAI::FileCreateParams::ExpiresAfter + + def initialize: ( + file: OpenAI::Internal::file_input, + purpose: OpenAI::Models::file_purpose, + ?expires_after: OpenAI::FileCreateParams::ExpiresAfter, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + file: OpenAI::Internal::file_input, + purpose: OpenAI::Models::file_purpose, + expires_after: OpenAI::FileCreateParams::ExpiresAfter, + request_options: OpenAI::RequestOptions + } + + type expires_after = { anchor: :created_at, seconds: Integer } + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + attr_accessor anchor: :created_at + + attr_accessor seconds: Integer + + def initialize: (seconds: Integer, ?anchor: :created_at) -> void + + def to_hash: -> { anchor: :created_at, seconds: Integer } + end end end end diff --git a/sig/openai/models/file_delete_params.rbs b/sig/openai/models/file_delete_params.rbs index fa6918f3..3c3ea094 100644 --- a/sig/openai/models/file_delete_params.rbs +++ b/sig/openai/models/file_delete_params.rbs @@ -1,16 +1,14 @@ module OpenAI module Models - type file_delete_params = { } & OpenAI::request_parameters + type file_delete_params = { } & OpenAI::Internal::Type::request_parameters - class FileDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | (?OpenAI::Models::file_delete_params | OpenAI::BaseModel data) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::file_delete_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/file_deleted.rbs b/sig/openai/models/file_deleted.rbs index a691b96b..c091a995 100644 --- a/sig/openai/models/file_deleted.rbs +++ b/sig/openai/models/file_deleted.rbs @@ -2,18 +2,16 @@ module OpenAI module Models type file_deleted = { id: String, deleted: bool, object: :file } - class FileDeleted < OpenAI::BaseModel + class FileDeleted < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor deleted: bool attr_accessor object: :file - def initialize: - (id: String, deleted: bool, object: :file) -> void - | (?OpenAI::Models::file_deleted | OpenAI::BaseModel data) -> void + def initialize: (id: String, deleted: bool, ?object: :file) -> void - def to_hash: -> OpenAI::Models::file_deleted + def to_hash: -> { id: String, deleted: bool, object: :file } end end end diff --git a/sig/openai/models/file_list_params.rbs b/sig/openai/models/file_list_params.rbs index 4b92a0e8..2d459f2d 100644 --- a/sig/openai/models/file_list_params.rbs +++ b/sig/openai/models/file_list_params.rbs @@ -7,11 +7,11 @@ module OpenAI order: OpenAI::Models::FileListParams::order, purpose: String } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class FileListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -31,25 +31,31 @@ module OpenAI def purpose=: (String) -> String - def initialize: - ( - after: String, - limit: Integer, - order: OpenAI::Models::FileListParams::order, - purpose: String, - request_options: OpenAI::request_opts - ) -> void - | (?OpenAI::Models::file_list_params | OpenAI::BaseModel data) -> void + def initialize: ( + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::FileListParams::order, + ?purpose: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::file_list_params + def to_hash: -> { + after: String, + limit: Integer, + order: OpenAI::Models::FileListParams::order, + purpose: String, + request_options: OpenAI::RequestOptions + } type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::FileListParams::order] + def self?.values: -> ::Array[OpenAI::Models::FileListParams::order] end end end diff --git a/sig/openai/models/file_object.rbs b/sig/openai/models/file_object.rbs index 1b0a4939..cfb4f32c 100644 --- a/sig/openai/models/file_object.rbs +++ b/sig/openai/models/file_object.rbs @@ -13,7 +13,7 @@ module OpenAI status_details: String } - class FileObject < OpenAI::BaseModel + class FileObject < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor bytes: Integer @@ -36,21 +36,29 @@ module OpenAI def status_details=: (String) -> String - def initialize: - ( - id: String, - bytes: Integer, - created_at: Integer, - filename: String, - purpose: OpenAI::Models::FileObject::purpose, - status: OpenAI::Models::FileObject::status, - expires_at: Integer, - status_details: String, - object: :file - ) -> void - | (?OpenAI::Models::file_object | OpenAI::BaseModel data) -> void - - def to_hash: -> OpenAI::Models::file_object + def initialize: ( + id: String, + bytes: Integer, + created_at: Integer, + filename: String, + purpose: OpenAI::Models::FileObject::purpose, + status: OpenAI::Models::FileObject::status, + ?expires_at: Integer, + ?status_details: String, + ?object: :file + ) -> void + + def to_hash: -> { + id: String, + bytes: Integer, + created_at: Integer, + filename: String, + object: :file, + purpose: OpenAI::Models::FileObject::purpose, + status: OpenAI::Models::FileObject::status, + expires_at: Integer, + status_details: String + } type purpose = :assistants @@ -60,8 +68,11 @@ module OpenAI | :"fine-tune" | :"fine-tune-results" | :vision + | :user_data + + module Purpose + extend OpenAI::Internal::Type::Enum - class Purpose < OpenAI::Enum ASSISTANTS: :assistants ASSISTANTS_OUTPUT: :assistants_output BATCH: :batch @@ -69,18 +80,21 @@ module OpenAI FINE_TUNE: :"fine-tune" FINE_TUNE_RESULTS: :"fine-tune-results" VISION: :vision + USER_DATA: :user_data - def self.values: -> ::Array[OpenAI::Models::FileObject::purpose] + def self?.values: -> ::Array[OpenAI::Models::FileObject::purpose] end type status = :uploaded | :processed | :error - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + UPLOADED: :uploaded PROCESSED: :processed ERROR: :error - def self.values: -> ::Array[OpenAI::Models::FileObject::status] + def self?.values: -> ::Array[OpenAI::Models::FileObject::status] end end end diff --git a/sig/openai/models/file_purpose.rbs b/sig/openai/models/file_purpose.rbs index cf532f5b..9b17621d 100644 --- a/sig/openai/models/file_purpose.rbs +++ b/sig/openai/models/file_purpose.rbs @@ -3,7 +3,9 @@ module OpenAI type file_purpose = :assistants | :batch | :"fine-tune" | :vision | :user_data | :evals - class FilePurpose < OpenAI::Enum + module FilePurpose + extend OpenAI::Internal::Type::Enum + ASSISTANTS: :assistants BATCH: :batch FINE_TUNE: :"fine-tune" @@ -11,7 +13,7 @@ module OpenAI USER_DATA: :user_data EVALS: :evals - def self.values: -> ::Array[OpenAI::Models::file_purpose] + def self?.values: -> ::Array[OpenAI::Models::file_purpose] end end end diff --git a/sig/openai/models/file_retrieve_params.rbs b/sig/openai/models/file_retrieve_params.rbs index dc986a6b..01eca336 100644 --- a/sig/openai/models/file_retrieve_params.rbs +++ b/sig/openai/models/file_retrieve_params.rbs @@ -1,18 +1,15 @@ module OpenAI module Models - type file_retrieve_params = { } & OpenAI::request_parameters + type file_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters - class FileRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::file_retrieve_params | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::file_retrieve_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs b/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs new file mode 100644 index 00000000..64b909d5 --- /dev/null +++ b/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs @@ -0,0 +1,55 @@ +module OpenAI + module Models + module FineTuning + module Alpha + type grader_run_params = + { + grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader, + model_sample: String, + item: top + } + & OpenAI::Internal::Type::request_parameters + + class GraderRunParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader + + attr_accessor model_sample: String + + attr_reader item: top? + + def item=: (top) -> top + + def initialize: ( + grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader, + model_sample: String, + ?item: top, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader, + model_sample: String, + item: top, + request_options: OpenAI::RequestOptions + } + + type grader = + OpenAI::Graders::StringCheckGrader + | OpenAI::Graders::TextSimilarityGrader + | OpenAI::Graders::PythonGrader + | OpenAI::Graders::ScoreModelGrader + | OpenAI::Graders::MultiGrader + + module Grader + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader] + end + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/alpha/grader_run_response.rbs b/sig/openai/models/fine_tuning/alpha/grader_run_response.rbs new file mode 100644 index 00000000..e54eb087 --- /dev/null +++ b/sig/openai/models/fine_tuning/alpha/grader_run_response.rbs @@ -0,0 +1,168 @@ +module OpenAI + module Models + module FineTuning + module Alpha + type grader_run_response = + { + metadata: OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata, + model_grader_token_usage_per_model: ::Hash[Symbol, top], + reward: Float, + sub_rewards: ::Hash[Symbol, top] + } + + class GraderRunResponse < OpenAI::Internal::Type::BaseModel + attr_accessor metadata: OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata + + attr_accessor model_grader_token_usage_per_model: ::Hash[Symbol, top] + + attr_accessor reward: Float + + attr_accessor sub_rewards: ::Hash[Symbol, top] + + def initialize: ( + metadata: OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata, + model_grader_token_usage_per_model: ::Hash[Symbol, top], + reward: Float, + sub_rewards: ::Hash[Symbol, top] + ) -> void + + def to_hash: -> { + metadata: OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata, + model_grader_token_usage_per_model: ::Hash[Symbol, top], + reward: Float, + sub_rewards: ::Hash[Symbol, top] + } + + type metadata = + { + errors: OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors, + execution_time: Float, + name: String, + sampled_model_name: String?, + scores: ::Hash[Symbol, top], + token_usage: Integer?, + type: String + } + + class Metadata < OpenAI::Internal::Type::BaseModel + attr_accessor errors: OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors + + attr_accessor execution_time: Float + + attr_accessor name: String + + attr_accessor sampled_model_name: String? + + attr_accessor scores: ::Hash[Symbol, top] + + attr_accessor token_usage: Integer? + + attr_accessor type: String + + def initialize: ( + errors: OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors, + execution_time: Float, + name: String, + sampled_model_name: String?, + scores: ::Hash[Symbol, top], + token_usage: Integer?, + type: String + ) -> void + + def to_hash: -> { + errors: OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors, + execution_time: Float, + name: String, + sampled_model_name: String?, + scores: ::Hash[Symbol, top], + token_usage: Integer?, + type: String + } + + type errors = + { + formula_parse_error: bool, + invalid_variable_error: bool, + model_grader_parse_error: bool, + model_grader_refusal_error: bool, + model_grader_server_error: bool, + model_grader_server_error_details: String?, + other_error: bool, + python_grader_runtime_error: bool, + python_grader_runtime_error_details: String?, + python_grader_server_error: bool, + python_grader_server_error_type: String?, + sample_parse_error: bool, + truncated_observation_error: bool, + unresponsive_reward_error: bool + } + + class Errors < OpenAI::Internal::Type::BaseModel + attr_accessor formula_parse_error: bool + + attr_accessor invalid_variable_error: bool + + attr_accessor model_grader_parse_error: bool + + attr_accessor model_grader_refusal_error: bool + + attr_accessor model_grader_server_error: bool + + attr_accessor model_grader_server_error_details: String? + + attr_accessor other_error: bool + + attr_accessor python_grader_runtime_error: bool + + attr_accessor python_grader_runtime_error_details: String? + + attr_accessor python_grader_server_error: bool + + attr_accessor python_grader_server_error_type: String? + + attr_accessor sample_parse_error: bool + + attr_accessor truncated_observation_error: bool + + attr_accessor unresponsive_reward_error: bool + + def initialize: ( + formula_parse_error: bool, + invalid_variable_error: bool, + model_grader_parse_error: bool, + model_grader_refusal_error: bool, + model_grader_server_error: bool, + model_grader_server_error_details: String?, + other_error: bool, + python_grader_runtime_error: bool, + python_grader_runtime_error_details: String?, + python_grader_server_error: bool, + python_grader_server_error_type: String?, + sample_parse_error: bool, + truncated_observation_error: bool, + unresponsive_reward_error: bool + ) -> void + + def to_hash: -> { + formula_parse_error: bool, + invalid_variable_error: bool, + model_grader_parse_error: bool, + model_grader_refusal_error: bool, + model_grader_server_error: bool, + model_grader_server_error_details: String?, + other_error: bool, + python_grader_runtime_error: bool, + python_grader_runtime_error_details: String?, + python_grader_server_error: bool, + python_grader_server_error_type: String?, + sample_parse_error: bool, + truncated_observation_error: bool, + unresponsive_reward_error: bool + } + end + end + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/alpha/grader_validate_params.rbs b/sig/openai/models/fine_tuning/alpha/grader_validate_params.rbs new file mode 100644 index 00000000..c7e0f385 --- /dev/null +++ b/sig/openai/models/fine_tuning/alpha/grader_validate_params.rbs @@ -0,0 +1,43 @@ +module OpenAI + module Models + module FineTuning + module Alpha + type grader_validate_params = + { + grader: OpenAI::Models::FineTuning::Alpha::GraderValidateParams::grader + } + & OpenAI::Internal::Type::request_parameters + + class GraderValidateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor grader: OpenAI::Models::FineTuning::Alpha::GraderValidateParams::grader + + def initialize: ( + grader: OpenAI::Models::FineTuning::Alpha::GraderValidateParams::grader, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + grader: OpenAI::Models::FineTuning::Alpha::GraderValidateParams::grader, + request_options: OpenAI::RequestOptions + } + + type grader = + OpenAI::Graders::StringCheckGrader + | OpenAI::Graders::TextSimilarityGrader + | OpenAI::Graders::PythonGrader + | OpenAI::Graders::ScoreModelGrader + | OpenAI::Graders::MultiGrader + + module Grader + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::Alpha::GraderValidateParams::grader] + end + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/alpha/grader_validate_response.rbs b/sig/openai/models/fine_tuning/alpha/grader_validate_response.rbs new file mode 100644 index 00000000..939e54e2 --- /dev/null +++ b/sig/openai/models/fine_tuning/alpha/grader_validate_response.rbs @@ -0,0 +1,41 @@ +module OpenAI + module Models + module FineTuning + module Alpha + type grader_validate_response = + { + grader: OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::grader + } + + class GraderValidateResponse < OpenAI::Internal::Type::BaseModel + attr_reader grader: OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::grader? + + def grader=: ( + OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::grader + ) -> OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::grader + + def initialize: ( + ?grader: OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::grader + ) -> void + + def to_hash: -> { + grader: OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::grader + } + + type grader = + OpenAI::Graders::StringCheckGrader + | OpenAI::Graders::TextSimilarityGrader + | OpenAI::Graders::PythonGrader + | OpenAI::Graders::ScoreModelGrader + | OpenAI::Graders::MultiGrader + + module Grader + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::grader] + end + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/checkpoints/permission_create_params.rbs b/sig/openai/models/fine_tuning/checkpoints/permission_create_params.rbs new file mode 100644 index 00000000..b74273b6 --- /dev/null +++ b/sig/openai/models/fine_tuning/checkpoints/permission_create_params.rbs @@ -0,0 +1,28 @@ +module OpenAI + module Models + module FineTuning + module Checkpoints + type permission_create_params = + { project_ids: ::Array[String] } + & OpenAI::Internal::Type::request_parameters + + class PermissionCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor project_ids: ::Array[String] + + def initialize: ( + project_ids: ::Array[String], + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + project_ids: ::Array[String], + request_options: OpenAI::RequestOptions + } + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/checkpoints/permission_create_response.rbs b/sig/openai/models/fine_tuning/checkpoints/permission_create_response.rbs new file mode 100644 index 00000000..9e54b6cd --- /dev/null +++ b/sig/openai/models/fine_tuning/checkpoints/permission_create_response.rbs @@ -0,0 +1,39 @@ +module OpenAI + module Models + module FineTuning + module Checkpoints + type permission_create_response = + { + id: String, + created_at: Integer, + object: :"checkpoint.permission", + project_id: String + } + + class PermissionCreateResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor object: :"checkpoint.permission" + + attr_accessor project_id: String + + def initialize: ( + id: String, + created_at: Integer, + project_id: String, + ?object: :"checkpoint.permission" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + object: :"checkpoint.permission", + project_id: String + } + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/checkpoints/permission_delete_params.rbs b/sig/openai/models/fine_tuning/checkpoints/permission_delete_params.rbs new file mode 100644 index 00000000..2d265756 --- /dev/null +++ b/sig/openai/models/fine_tuning/checkpoints/permission_delete_params.rbs @@ -0,0 +1,28 @@ +module OpenAI + module Models + module FineTuning + module Checkpoints + type permission_delete_params = + { fine_tuned_model_checkpoint: String } + & OpenAI::Internal::Type::request_parameters + + class PermissionDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_accessor fine_tuned_model_checkpoint: String + + def initialize: ( + fine_tuned_model_checkpoint: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + fine_tuned_model_checkpoint: String, + request_options: OpenAI::RequestOptions + } + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/checkpoints/permission_delete_response.rbs b/sig/openai/models/fine_tuning/checkpoints/permission_delete_response.rbs new file mode 100644 index 00000000..c2d55981 --- /dev/null +++ b/sig/openai/models/fine_tuning/checkpoints/permission_delete_response.rbs @@ -0,0 +1,30 @@ +module OpenAI + module Models + module FineTuning + module Checkpoints + type permission_delete_response = + { id: String, deleted: bool, object: :"checkpoint.permission" } + + class PermissionDeleteResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor deleted: bool + + attr_accessor object: :"checkpoint.permission" + + def initialize: ( + id: String, + deleted: bool, + ?object: :"checkpoint.permission" + ) -> void + + def to_hash: -> { + id: String, + deleted: bool, + object: :"checkpoint.permission" + } + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_params.rbs b/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_params.rbs new file mode 100644 index 00000000..a76caaa1 --- /dev/null +++ b/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_params.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module FineTuning + module Checkpoints + type permission_retrieve_params = + { + after: String, + limit: Integer, + order: OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::order, + project_id: String + } + & OpenAI::Internal::Type::request_parameters + + class PermissionRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + attr_reader after: String? + + def after=: (String) -> String + + attr_reader limit: Integer? + + def limit=: (Integer) -> Integer + + attr_reader order: OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::order? + + def order=: ( + OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::order + ) -> OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::order + + attr_reader project_id: String? + + def project_id=: (String) -> String + + def initialize: ( + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::order, + ?project_id: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + limit: Integer, + order: OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::order, + project_id: String, + request_options: OpenAI::RequestOptions + } + + type order = :ascending | :descending + + module Order + extend OpenAI::Internal::Type::Enum + + ASCENDING: :ascending + DESCENDING: :descending + + def self?.values: -> ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::order] + end + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs b/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs new file mode 100644 index 00000000..54f9630a --- /dev/null +++ b/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs @@ -0,0 +1,76 @@ +module OpenAI + module Models + module FineTuning + module Checkpoints + type permission_retrieve_response = + { + data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data], + has_more: bool, + object: :list, + first_id: String?, + last_id: String? + } + + class PermissionRetrieveResponse < OpenAI::Internal::Type::BaseModel + attr_accessor data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data] + + attr_accessor has_more: bool + + attr_accessor object: :list + + attr_accessor first_id: String? + + attr_accessor last_id: String? + + def initialize: ( + data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data], + has_more: bool, + ?first_id: String?, + ?last_id: String?, + ?object: :list + ) -> void + + def to_hash: -> { + data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data], + has_more: bool, + object: :list, + first_id: String?, + last_id: String? + } + + type data = + { + id: String, + created_at: Integer, + object: :"checkpoint.permission", + project_id: String + } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor object: :"checkpoint.permission" + + attr_accessor project_id: String + + def initialize: ( + id: String, + created_at: Integer, + project_id: String, + ?object: :"checkpoint.permission" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + object: :"checkpoint.permission", + project_id: String + } + end + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/dpo_hyperparameters.rbs b/sig/openai/models/fine_tuning/dpo_hyperparameters.rbs new file mode 100644 index 00000000..0abf70d0 --- /dev/null +++ b/sig/openai/models/fine_tuning/dpo_hyperparameters.rbs @@ -0,0 +1,85 @@ +module OpenAI + module Models + module FineTuning + type dpo_hyperparameters = + { + batch_size: OpenAI::Models::FineTuning::DpoHyperparameters::batch_size, + beta: OpenAI::Models::FineTuning::DpoHyperparameters::beta, + learning_rate_multiplier: OpenAI::Models::FineTuning::DpoHyperparameters::learning_rate_multiplier, + n_epochs: OpenAI::Models::FineTuning::DpoHyperparameters::n_epochs + } + + class DpoHyperparameters < OpenAI::Internal::Type::BaseModel + attr_reader batch_size: OpenAI::Models::FineTuning::DpoHyperparameters::batch_size? + + def batch_size=: ( + OpenAI::Models::FineTuning::DpoHyperparameters::batch_size + ) -> OpenAI::Models::FineTuning::DpoHyperparameters::batch_size + + attr_reader beta: OpenAI::Models::FineTuning::DpoHyperparameters::beta? + + def beta=: ( + OpenAI::Models::FineTuning::DpoHyperparameters::beta + ) -> OpenAI::Models::FineTuning::DpoHyperparameters::beta + + attr_reader learning_rate_multiplier: OpenAI::Models::FineTuning::DpoHyperparameters::learning_rate_multiplier? + + def learning_rate_multiplier=: ( + OpenAI::Models::FineTuning::DpoHyperparameters::learning_rate_multiplier + ) -> OpenAI::Models::FineTuning::DpoHyperparameters::learning_rate_multiplier + + attr_reader n_epochs: OpenAI::Models::FineTuning::DpoHyperparameters::n_epochs? + + def n_epochs=: ( + OpenAI::Models::FineTuning::DpoHyperparameters::n_epochs + ) -> OpenAI::Models::FineTuning::DpoHyperparameters::n_epochs + + def initialize: ( + ?batch_size: OpenAI::Models::FineTuning::DpoHyperparameters::batch_size, + ?beta: OpenAI::Models::FineTuning::DpoHyperparameters::beta, + ?learning_rate_multiplier: OpenAI::Models::FineTuning::DpoHyperparameters::learning_rate_multiplier, + ?n_epochs: OpenAI::Models::FineTuning::DpoHyperparameters::n_epochs + ) -> void + + def to_hash: -> { + batch_size: OpenAI::Models::FineTuning::DpoHyperparameters::batch_size, + beta: OpenAI::Models::FineTuning::DpoHyperparameters::beta, + learning_rate_multiplier: OpenAI::Models::FineTuning::DpoHyperparameters::learning_rate_multiplier, + n_epochs: OpenAI::Models::FineTuning::DpoHyperparameters::n_epochs + } + + type batch_size = :auto | Integer + + module BatchSize + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::DpoHyperparameters::batch_size] + end + + type beta = :auto | Float + + module Beta + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::DpoHyperparameters::beta] + end + + type learning_rate_multiplier = :auto | Float + + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::DpoHyperparameters::learning_rate_multiplier] + end + + type n_epochs = :auto | Integer + + module NEpochs + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::DpoHyperparameters::n_epochs] + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/dpo_method.rbs b/sig/openai/models/fine_tuning/dpo_method.rbs new file mode 100644 index 00000000..094cebbf --- /dev/null +++ b/sig/openai/models/fine_tuning/dpo_method.rbs @@ -0,0 +1,24 @@ +module OpenAI + module Models + module FineTuning + type dpo_method = + { hyperparameters: OpenAI::FineTuning::DpoHyperparameters } + + class DpoMethod < OpenAI::Internal::Type::BaseModel + attr_reader hyperparameters: OpenAI::FineTuning::DpoHyperparameters? + + def hyperparameters=: ( + OpenAI::FineTuning::DpoHyperparameters + ) -> OpenAI::FineTuning::DpoHyperparameters + + def initialize: ( + ?hyperparameters: OpenAI::FineTuning::DpoHyperparameters + ) -> void + + def to_hash: -> { + hyperparameters: OpenAI::FineTuning::DpoHyperparameters + } + end + end + end +end diff --git a/sig/openai/models/fine_tuning/fine_tuning_job.rbs b/sig/openai/models/fine_tuning/fine_tuning_job.rbs index 44ab2010..f2c2812d 100644 --- a/sig/openai/models/fine_tuning/fine_tuning_job.rbs +++ b/sig/openai/models/fine_tuning/fine_tuning_job.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class FineTuningJob = FineTuning::FineTuningJob module FineTuning @@ -8,10 +7,10 @@ module OpenAI { id: String, created_at: Integer, - error: OpenAI::Models::FineTuning::FineTuningJob::Error?, + error: OpenAI::FineTuning::FineTuningJob::Error?, fine_tuned_model: String?, finished_at: Integer?, - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters, + hyperparameters: OpenAI::FineTuning::FineTuningJob::Hyperparameters, model: String, object: :"fine_tuning.job", organization_id: String, @@ -22,23 +21,23 @@ module OpenAI training_file: String, validation_file: String?, estimated_finish: Integer?, - integrations: ::Array[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject]?, + integrations: ::Array[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]?, metadata: OpenAI::Models::metadata?, - method_: OpenAI::Models::FineTuning::FineTuningJob::Method + method_: OpenAI::FineTuning::FineTuningJob::Method } - class FineTuningJob < OpenAI::BaseModel + class FineTuningJob < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created_at: Integer - attr_accessor error: OpenAI::Models::FineTuning::FineTuningJob::Error? + attr_accessor error: OpenAI::FineTuning::FineTuningJob::Error? attr_accessor fine_tuned_model: String? attr_accessor finished_at: Integer? - attr_accessor hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters + attr_accessor hyperparameters: OpenAI::FineTuning::FineTuningJob::Hyperparameters attr_accessor model: String @@ -60,77 +59,87 @@ module OpenAI attr_accessor estimated_finish: Integer? - attr_accessor integrations: ::Array[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject]? + attr_accessor integrations: ::Array[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]? attr_accessor metadata: OpenAI::Models::metadata? - attr_reader method_: OpenAI::Models::FineTuning::FineTuningJob::Method? + attr_reader method_: OpenAI::FineTuning::FineTuningJob::Method? def method_=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method - - def initialize: - ( - id: String, - created_at: Integer, - error: OpenAI::Models::FineTuning::FineTuningJob::Error?, - fine_tuned_model: String?, - finished_at: Integer?, - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters, - model: String, - organization_id: String, - result_files: ::Array[String], - seed: Integer, - status: OpenAI::Models::FineTuning::FineTuningJob::status, - trained_tokens: Integer?, - training_file: String, - validation_file: String?, - estimated_finish: Integer?, - integrations: ::Array[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject]?, - metadata: OpenAI::Models::metadata?, - method_: OpenAI::Models::FineTuning::FineTuningJob::Method, - object: :"fine_tuning.job" - ) -> void - | ( - ?OpenAI::Models::FineTuning::fine_tuning_job - | OpenAI::BaseModel data - ) -> void + OpenAI::FineTuning::FineTuningJob::Method + ) -> OpenAI::FineTuning::FineTuningJob::Method - def to_hash: -> OpenAI::Models::FineTuning::fine_tuning_job + def initialize: ( + id: String, + created_at: Integer, + error: OpenAI::FineTuning::FineTuningJob::Error?, + fine_tuned_model: String?, + finished_at: Integer?, + hyperparameters: OpenAI::FineTuning::FineTuningJob::Hyperparameters, + model: String, + organization_id: String, + result_files: ::Array[String], + seed: Integer, + status: OpenAI::Models::FineTuning::FineTuningJob::status, + trained_tokens: Integer?, + training_file: String, + validation_file: String?, + ?estimated_finish: Integer?, + ?integrations: ::Array[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]?, + ?metadata: OpenAI::Models::metadata?, + ?method_: OpenAI::FineTuning::FineTuningJob::Method, + ?object: :"fine_tuning.job" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + error: OpenAI::FineTuning::FineTuningJob::Error?, + fine_tuned_model: String?, + finished_at: Integer?, + hyperparameters: OpenAI::FineTuning::FineTuningJob::Hyperparameters, + model: String, + object: :"fine_tuning.job", + organization_id: String, + result_files: ::Array[String], + seed: Integer, + status: OpenAI::Models::FineTuning::FineTuningJob::status, + trained_tokens: Integer?, + training_file: String, + validation_file: String?, + estimated_finish: Integer?, + integrations: ::Array[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]?, + metadata: OpenAI::Models::metadata?, + method_: OpenAI::FineTuning::FineTuningJob::Method + } type error = { code: String, message: String, param: String? } - class Error < OpenAI::BaseModel + class Error < OpenAI::Internal::Type::BaseModel attr_accessor code: String attr_accessor message: String attr_accessor param: String? - def initialize: - (code: String, message: String, param: String?) -> void - | ( - ?OpenAI::Models::FineTuning::FineTuningJob::error - | OpenAI::BaseModel data - ) -> void + def initialize: ( + code: String, + message: String, + param: String? + ) -> void - def to_hash: -> OpenAI::Models::FineTuning::FineTuningJob::error + def to_hash: -> { code: String, message: String, param: String? } end type hyperparameters = { - batch_size: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::batch_size, + batch_size: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::batch_size?, learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::learning_rate_multiplier, n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::n_epochs } - class Hyperparameters < OpenAI::BaseModel - attr_reader batch_size: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::batch_size? - - def batch_size=: ( - OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::batch_size - ) -> OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::batch_size + class Hyperparameters < OpenAI::Internal::Type::BaseModel + attr_accessor batch_size: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::batch_size? attr_reader learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::learning_rate_multiplier? @@ -144,35 +153,40 @@ module OpenAI OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::n_epochs ) -> OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::n_epochs - def initialize: - ( - batch_size: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::batch_size, - learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::learning_rate_multiplier, - n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::n_epochs - ) -> void - | ( - ?OpenAI::Models::FineTuning::FineTuningJob::hyperparameters - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?batch_size: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::batch_size?, + ?learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::learning_rate_multiplier, + ?n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::n_epochs + ) -> void - def to_hash: -> OpenAI::Models::FineTuning::FineTuningJob::hyperparameters + def to_hash: -> { + batch_size: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::batch_size?, + learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::learning_rate_multiplier, + n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::n_epochs + } type batch_size = :auto | Integer - class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + module BatchSize + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::batch_size] end type learning_rate_multiplier = :auto | Float - class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::learning_rate_multiplier] end type n_epochs = :auto | Integer - class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + module NEpochs + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters::n_epochs] end end @@ -184,7 +198,9 @@ module OpenAI | :failed | :cancelled - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + VALIDATING_FILES: :validating_files QUEUED: :queued RUNNING: :running @@ -192,233 +208,62 @@ module OpenAI FAILED: :failed CANCELLED: :cancelled - def self.values: -> ::Array[OpenAI::Models::FineTuning::FineTuningJob::status] + def self?.values: -> ::Array[OpenAI::Models::FineTuning::FineTuningJob::status] end type method_ = { - dpo: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo, - supervised: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised, - type: OpenAI::Models::FineTuning::FineTuningJob::Method::type_ + type: OpenAI::Models::FineTuning::FineTuningJob::Method::type_, + dpo: OpenAI::FineTuning::DpoMethod, + reinforcement: OpenAI::FineTuning::ReinforcementMethod, + supervised: OpenAI::FineTuning::SupervisedMethod } - class Method < OpenAI::BaseModel - attr_reader dpo: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo? - - def dpo=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo - - attr_reader supervised: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised? - - def supervised=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised - - attr_reader type: OpenAI::Models::FineTuning::FineTuningJob::Method::type_? - - def type=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::type_ - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::type_ - - def initialize: - ( - dpo: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo, - supervised: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised, - type: OpenAI::Models::FineTuning::FineTuningJob::Method::type_ - ) -> void - | ( - ?OpenAI::Models::FineTuning::FineTuningJob::method_ - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::FineTuningJob::method_ - - type dpo = - { - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters - } - - class Dpo < OpenAI::BaseModel - attr_reader hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters? - - def hyperparameters=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters - - def initialize: - ( - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters - ) -> void - | ( - ?OpenAI::Models::FineTuning::FineTuningJob::Method::dpo - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::FineTuningJob::Method::dpo - - type hyperparameters = - { - batch_size: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::batch_size, - beta: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::beta, - learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::learning_rate_multiplier, - n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::n_epochs - } - - class Hyperparameters < OpenAI::BaseModel - attr_reader batch_size: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::batch_size? - - def batch_size=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::batch_size - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::batch_size - - attr_reader beta: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::beta? - - def beta=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::beta - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::beta - - attr_reader learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::learning_rate_multiplier? - - def learning_rate_multiplier=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::learning_rate_multiplier - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::learning_rate_multiplier - - attr_reader n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::n_epochs? - - def n_epochs=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::n_epochs - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::n_epochs - - def initialize: - ( - batch_size: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::batch_size, - beta: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::beta, - learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::learning_rate_multiplier, - n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters::n_epochs - ) -> void - | ( - ?OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::hyperparameters - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::hyperparameters - - type batch_size = :auto | Integer - - class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] - end - - type beta = :auto | Float - - class Beta < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] - end - - type learning_rate_multiplier = :auto | Float - - class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] - end - - type n_epochs = :auto | Integer - - class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] - end - end - end - - type supervised = - { - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters - } - - class Supervised < OpenAI::BaseModel - attr_reader hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters? + class Method < OpenAI::Internal::Type::BaseModel + attr_accessor type: OpenAI::Models::FineTuning::FineTuningJob::Method::type_ - def hyperparameters=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters + attr_reader dpo: OpenAI::FineTuning::DpoMethod? - def initialize: - ( - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters - ) -> void - | ( - ?OpenAI::Models::FineTuning::FineTuningJob::Method::supervised - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::FineTuningJob::Method::supervised - - type hyperparameters = - { - batch_size: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::batch_size, - learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::learning_rate_multiplier, - n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::n_epochs - } - - class Hyperparameters < OpenAI::BaseModel - attr_reader batch_size: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::batch_size? - - def batch_size=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::batch_size - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::batch_size - - attr_reader learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::learning_rate_multiplier? - - def learning_rate_multiplier=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::learning_rate_multiplier - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::learning_rate_multiplier - - attr_reader n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::n_epochs? - - def n_epochs=: ( - OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::n_epochs - ) -> OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::n_epochs - - def initialize: - ( - batch_size: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::batch_size, - learning_rate_multiplier: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::learning_rate_multiplier, - n_epochs: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters::n_epochs - ) -> void - | ( - ?OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::hyperparameters - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::hyperparameters + def dpo=: ( + OpenAI::FineTuning::DpoMethod + ) -> OpenAI::FineTuning::DpoMethod - type batch_size = :auto | Integer + attr_reader reinforcement: OpenAI::FineTuning::ReinforcementMethod? - class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] - end + def reinforcement=: ( + OpenAI::FineTuning::ReinforcementMethod + ) -> OpenAI::FineTuning::ReinforcementMethod - type learning_rate_multiplier = :auto | Float + attr_reader supervised: OpenAI::FineTuning::SupervisedMethod? - class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] - end + def supervised=: ( + OpenAI::FineTuning::SupervisedMethod + ) -> OpenAI::FineTuning::SupervisedMethod + + def initialize: ( + type: OpenAI::Models::FineTuning::FineTuningJob::Method::type_, + ?dpo: OpenAI::FineTuning::DpoMethod, + ?reinforcement: OpenAI::FineTuning::ReinforcementMethod, + ?supervised: OpenAI::FineTuning::SupervisedMethod + ) -> void - type n_epochs = :auto | Integer + def to_hash: -> { + type: OpenAI::Models::FineTuning::FineTuningJob::Method::type_, + dpo: OpenAI::FineTuning::DpoMethod, + reinforcement: OpenAI::FineTuning::ReinforcementMethod, + supervised: OpenAI::FineTuning::SupervisedMethod + } - class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] - end - end - end + type type_ = :supervised | :dpo | :reinforcement - type type_ = :supervised | :dpo + module Type + extend OpenAI::Internal::Type::Enum - class Type < OpenAI::Enum SUPERVISED: :supervised DPO: :dpo + REINFORCEMENT: :reinforcement - def self.values: -> ::Array[OpenAI::Models::FineTuning::FineTuningJob::Method::type_] + def self?.values: -> ::Array[OpenAI::Models::FineTuning::FineTuningJob::Method::type_] end end end diff --git a/sig/openai/models/fine_tuning/fine_tuning_job_event.rbs b/sig/openai/models/fine_tuning/fine_tuning_job_event.rbs index 53287f13..b1c4f9c4 100644 --- a/sig/openai/models/fine_tuning/fine_tuning_job_event.rbs +++ b/sig/openai/models/fine_tuning/fine_tuning_job_event.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class FineTuningJobEvent = FineTuning::FineTuningJobEvent module FineTuning @@ -15,7 +14,7 @@ module OpenAI type: OpenAI::Models::FineTuning::FineTuningJobEvent::type_ } - class FineTuningJobEvent < OpenAI::BaseModel + class FineTuningJobEvent < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created_at: Integer @@ -36,40 +35,47 @@ module OpenAI OpenAI::Models::FineTuning::FineTuningJobEvent::type_ ) -> OpenAI::Models::FineTuning::FineTuningJobEvent::type_ - def initialize: - ( - id: String, - created_at: Integer, - level: OpenAI::Models::FineTuning::FineTuningJobEvent::level, - message: String, - data: top, - type: OpenAI::Models::FineTuning::FineTuningJobEvent::type_, - object: :"fine_tuning.job.event" - ) -> void - | ( - ?OpenAI::Models::FineTuning::fine_tuning_job_event - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::fine_tuning_job_event + def initialize: ( + id: String, + created_at: Integer, + level: OpenAI::Models::FineTuning::FineTuningJobEvent::level, + message: String, + ?data: top, + ?type: OpenAI::Models::FineTuning::FineTuningJobEvent::type_, + ?object: :"fine_tuning.job.event" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + level: OpenAI::Models::FineTuning::FineTuningJobEvent::level, + message: String, + object: :"fine_tuning.job.event", + data: top, + type: OpenAI::Models::FineTuning::FineTuningJobEvent::type_ + } type level = :info | :warn | :error - class Level < OpenAI::Enum + module Level + extend OpenAI::Internal::Type::Enum + INFO: :info WARN: :warn ERROR: :error - def self.values: -> ::Array[OpenAI::Models::FineTuning::FineTuningJobEvent::level] + def self?.values: -> ::Array[OpenAI::Models::FineTuning::FineTuningJobEvent::level] end type type_ = :message | :metrics - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + MESSAGE: :message METRICS: :metrics - def self.values: -> ::Array[OpenAI::Models::FineTuning::FineTuningJobEvent::type_] + def self?.values: -> ::Array[OpenAI::Models::FineTuning::FineTuningJobEvent::type_] end end end diff --git a/sig/openai/models/fine_tuning/fine_tuning_job_integration.rbs b/sig/openai/models/fine_tuning/fine_tuning_job_integration.rbs index 55ed40a2..ada2b1f0 100644 --- a/sig/openai/models/fine_tuning/fine_tuning_job_integration.rbs +++ b/sig/openai/models/fine_tuning/fine_tuning_job_integration.rbs @@ -1,10 +1,8 @@ module OpenAI module Models - class FineTuningJobIntegration = FineTuning::FineTuningJobIntegration module FineTuning - class FineTuningJobIntegration = OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject end end diff --git a/sig/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbs b/sig/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbs index 47f895a9..1af84b2b 100644 --- a/sig/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbs +++ b/sig/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class FineTuningJobWandbIntegration = FineTuning::FineTuningJobWandbIntegration module FineTuning @@ -12,7 +11,7 @@ module OpenAI tags: ::Array[String] } - class FineTuningJobWandbIntegration < OpenAI::BaseModel + class FineTuningJobWandbIntegration < OpenAI::Internal::Type::BaseModel attr_accessor project: String attr_accessor entity: String? @@ -23,19 +22,19 @@ module OpenAI def tags=: (::Array[String]) -> ::Array[String] - def initialize: - ( - project: String, - entity: String?, - name: String?, - tags: ::Array[String] - ) -> void - | ( - ?OpenAI::Models::FineTuning::fine_tuning_job_wandb_integration - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::fine_tuning_job_wandb_integration + def initialize: ( + project: String, + ?entity: String?, + ?name: String?, + ?tags: ::Array[String] + ) -> void + + def to_hash: -> { + project: String, + entity: String?, + name: String?, + tags: ::Array[String] + } end end end diff --git a/sig/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbs b/sig/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbs index f9039369..aeb6caad 100644 --- a/sig/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbs +++ b/sig/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbs @@ -1,31 +1,28 @@ module OpenAI module Models - class FineTuningJobWandbIntegrationObject = FineTuning::FineTuningJobWandbIntegrationObject module FineTuning type fine_tuning_job_wandb_integration_object = { type: :wandb, - wandb: OpenAI::Models::FineTuning::FineTuningJobWandbIntegration + wandb: OpenAI::FineTuning::FineTuningJobWandbIntegration } - class FineTuningJobWandbIntegrationObject < OpenAI::BaseModel + class FineTuningJobWandbIntegrationObject < OpenAI::Internal::Type::BaseModel attr_accessor type: :wandb - attr_accessor wandb: OpenAI::Models::FineTuning::FineTuningJobWandbIntegration + attr_accessor wandb: OpenAI::FineTuning::FineTuningJobWandbIntegration - def initialize: - ( - wandb: OpenAI::Models::FineTuning::FineTuningJobWandbIntegration, - type: :wandb - ) -> void - | ( - ?OpenAI::Models::FineTuning::fine_tuning_job_wandb_integration_object - | OpenAI::BaseModel data - ) -> void + def initialize: ( + wandb: OpenAI::FineTuning::FineTuningJobWandbIntegration, + ?type: :wandb + ) -> void - def to_hash: -> OpenAI::Models::FineTuning::fine_tuning_job_wandb_integration_object + def to_hash: -> { + type: :wandb, + wandb: OpenAI::FineTuning::FineTuningJobWandbIntegration + } end end end diff --git a/sig/openai/models/fine_tuning/job_cancel_params.rbs b/sig/openai/models/fine_tuning/job_cancel_params.rbs index 922d99dc..2548b37f 100644 --- a/sig/openai/models/fine_tuning/job_cancel_params.rbs +++ b/sig/openai/models/fine_tuning/job_cancel_params.rbs @@ -1,20 +1,15 @@ module OpenAI module Models module FineTuning - type job_cancel_params = { } & OpenAI::request_parameters + type job_cancel_params = { } & OpenAI::Internal::Type::request_parameters - class JobCancelParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class JobCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::FineTuning::job_cancel_params - | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::FineTuning::job_cancel_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/fine_tuning/job_create_params.rbs b/sig/openai/models/fine_tuning/job_create_params.rbs index b8be5ee1..f1f03e19 100644 --- a/sig/openai/models/fine_tuning/job_create_params.rbs +++ b/sig/openai/models/fine_tuning/job_create_params.rbs @@ -5,39 +5,39 @@ module OpenAI { model: OpenAI::Models::FineTuning::JobCreateParams::model, training_file: String, - hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters, - integrations: ::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration]?, + hyperparameters: OpenAI::FineTuning::JobCreateParams::Hyperparameters, + integrations: ::Array[OpenAI::FineTuning::JobCreateParams::Integration]?, metadata: OpenAI::Models::metadata?, - method_: OpenAI::Models::FineTuning::JobCreateParams::Method, + method_: OpenAI::FineTuning::JobCreateParams::Method, seed: Integer?, suffix: String?, validation_file: String? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class JobCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class JobCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor model: OpenAI::Models::FineTuning::JobCreateParams::model attr_accessor training_file: String - attr_reader hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters? + attr_reader hyperparameters: OpenAI::FineTuning::JobCreateParams::Hyperparameters? def hyperparameters=: ( - OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters - ) -> OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters + OpenAI::FineTuning::JobCreateParams::Hyperparameters + ) -> OpenAI::FineTuning::JobCreateParams::Hyperparameters - attr_accessor integrations: ::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration]? + attr_accessor integrations: ::Array[OpenAI::FineTuning::JobCreateParams::Integration]? attr_accessor metadata: OpenAI::Models::metadata? - attr_reader method_: OpenAI::Models::FineTuning::JobCreateParams::Method? + attr_reader method_: OpenAI::FineTuning::JobCreateParams::Method? def method_=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method + OpenAI::FineTuning::JobCreateParams::Method + ) -> OpenAI::FineTuning::JobCreateParams::Method attr_accessor seed: Integer? @@ -45,43 +45,48 @@ module OpenAI attr_accessor validation_file: String? - def initialize: - ( - model: OpenAI::Models::FineTuning::JobCreateParams::model, - training_file: String, - hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters, - integrations: ::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration]?, - metadata: OpenAI::Models::metadata?, - method_: OpenAI::Models::FineTuning::JobCreateParams::Method, - seed: Integer?, - suffix: String?, - validation_file: String?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::FineTuning::job_create_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::job_create_params + def initialize: ( + model: OpenAI::Models::FineTuning::JobCreateParams::model, + training_file: String, + ?hyperparameters: OpenAI::FineTuning::JobCreateParams::Hyperparameters, + ?integrations: ::Array[OpenAI::FineTuning::JobCreateParams::Integration]?, + ?metadata: OpenAI::Models::metadata?, + ?method_: OpenAI::FineTuning::JobCreateParams::Method, + ?seed: Integer?, + ?suffix: String?, + ?validation_file: String?, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + model: OpenAI::Models::FineTuning::JobCreateParams::model, + training_file: String, + hyperparameters: OpenAI::FineTuning::JobCreateParams::Hyperparameters, + integrations: ::Array[OpenAI::FineTuning::JobCreateParams::Integration]?, + metadata: OpenAI::Models::metadata?, + method_: OpenAI::FineTuning::JobCreateParams::Method, + seed: Integer?, + suffix: String?, + validation_file: String?, + request_options: OpenAI::RequestOptions + } type model = - String | OpenAI::Models::FineTuning::JobCreateParams::Model::preset - - class Model < OpenAI::Union - type preset = - :"babbage-002" | :"davinci-002" | :"gpt-3.5-turbo" | :"gpt-4o-mini" + String + | :"babbage-002" + | :"davinci-002" + | :"gpt-3.5-turbo" + | :"gpt-4o-mini" - class Preset < OpenAI::Enum - BABBAGE_002: :"babbage-002" - DAVINCI_002: :"davinci-002" - GPT_3_5_TURBO: :"gpt-3.5-turbo" - GPT_4O_MINI: :"gpt-4o-mini" + module Model + extend OpenAI::Internal::Type::Union - def self.values: -> ::Array[OpenAI::Models::FineTuning::JobCreateParams::Model::preset] - end + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::JobCreateParams::model] - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::FineTuning::JobCreateParams::Model::preset]] + BABBAGE_002: :"babbage-002" + DAVINCI_002: :"davinci-002" + GPT_3_5_TURBO: :"gpt-3.5-turbo" + GPT_4O_MINI: :"gpt-4o-mini" end type hyperparameters = @@ -91,7 +96,7 @@ module OpenAI n_epochs: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::n_epochs } - class Hyperparameters < OpenAI::BaseModel + class Hyperparameters < OpenAI::Internal::Type::BaseModel attr_reader batch_size: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::batch_size? def batch_size=: ( @@ -110,60 +115,63 @@ module OpenAI OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::n_epochs ) -> OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::n_epochs - def initialize: - ( - batch_size: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::batch_size, - learning_rate_multiplier: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::learning_rate_multiplier, - n_epochs: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::n_epochs - ) -> void - | ( - ?OpenAI::Models::FineTuning::JobCreateParams::hyperparameters - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?batch_size: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::batch_size, + ?learning_rate_multiplier: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::learning_rate_multiplier, + ?n_epochs: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::n_epochs + ) -> void - def to_hash: -> OpenAI::Models::FineTuning::JobCreateParams::hyperparameters + def to_hash: -> { + batch_size: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::batch_size, + learning_rate_multiplier: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::learning_rate_multiplier, + n_epochs: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::n_epochs + } type batch_size = :auto | Integer - class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + module BatchSize + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::batch_size] end type learning_rate_multiplier = :auto | Float - class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::learning_rate_multiplier] end type n_epochs = :auto | Integer - class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] + module NEpochs + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters::n_epochs] end end type integration = { type: :wandb, - wandb: OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb + wandb: OpenAI::FineTuning::JobCreateParams::Integration::Wandb } - class Integration < OpenAI::BaseModel + class Integration < OpenAI::Internal::Type::BaseModel attr_accessor type: :wandb - attr_accessor wandb: OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb + attr_accessor wandb: OpenAI::FineTuning::JobCreateParams::Integration::Wandb - def initialize: - ( - wandb: OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb, - type: :wandb - ) -> void - | ( - ?OpenAI::Models::FineTuning::JobCreateParams::integration - | OpenAI::BaseModel data - ) -> void + def initialize: ( + wandb: OpenAI::FineTuning::JobCreateParams::Integration::Wandb, + ?type: :wandb + ) -> void - def to_hash: -> OpenAI::Models::FineTuning::JobCreateParams::integration + def to_hash: -> { + type: :wandb, + wandb: OpenAI::FineTuning::JobCreateParams::Integration::Wandb + } type wandb = { @@ -173,7 +181,7 @@ module OpenAI tags: ::Array[String] } - class Wandb < OpenAI::BaseModel + class Wandb < OpenAI::Internal::Type::BaseModel attr_accessor project: String attr_accessor entity: String? @@ -184,246 +192,75 @@ module OpenAI def tags=: (::Array[String]) -> ::Array[String] - def initialize: - ( - project: String, - entity: String?, - name: String?, - tags: ::Array[String] - ) -> void - | ( - ?OpenAI::Models::FineTuning::JobCreateParams::Integration::wandb - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::JobCreateParams::Integration::wandb + def initialize: ( + project: String, + ?entity: String?, + ?name: String?, + ?tags: ::Array[String] + ) -> void + + def to_hash: -> { + project: String, + entity: String?, + name: String?, + tags: ::Array[String] + } end end type method_ = { - dpo: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo, - supervised: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised, - type: OpenAI::Models::FineTuning::JobCreateParams::Method::type_ + type: OpenAI::Models::FineTuning::JobCreateParams::Method::type_, + dpo: OpenAI::FineTuning::DpoMethod, + reinforcement: OpenAI::FineTuning::ReinforcementMethod, + supervised: OpenAI::FineTuning::SupervisedMethod } - class Method < OpenAI::BaseModel - attr_reader dpo: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo? + class Method < OpenAI::Internal::Type::BaseModel + attr_accessor type: OpenAI::Models::FineTuning::JobCreateParams::Method::type_ - def dpo=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo - - attr_reader supervised: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised? - - def supervised=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised - - attr_reader type: OpenAI::Models::FineTuning::JobCreateParams::Method::type_? - - def type=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::type_ - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::type_ + attr_reader dpo: OpenAI::FineTuning::DpoMethod? - def initialize: - ( - dpo: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo, - supervised: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised, - type: OpenAI::Models::FineTuning::JobCreateParams::Method::type_ - ) -> void - | ( - ?OpenAI::Models::FineTuning::JobCreateParams::method_ - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::JobCreateParams::method_ - - type dpo = - { - hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters - } - - class Dpo < OpenAI::BaseModel - attr_reader hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters? - - def hyperparameters=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters - - def initialize: - ( - hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters - ) -> void - | ( - ?OpenAI::Models::FineTuning::JobCreateParams::Method::dpo - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::JobCreateParams::Method::dpo - - type hyperparameters = - { - batch_size: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::batch_size, - beta: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::beta, - learning_rate_multiplier: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::learning_rate_multiplier, - n_epochs: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::n_epochs - } - - class Hyperparameters < OpenAI::BaseModel - attr_reader batch_size: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::batch_size? - - def batch_size=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::batch_size - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::batch_size - - attr_reader beta: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::beta? - - def beta=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::beta - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::beta - - attr_reader learning_rate_multiplier: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::learning_rate_multiplier? - - def learning_rate_multiplier=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::learning_rate_multiplier - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::learning_rate_multiplier - - attr_reader n_epochs: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::n_epochs? - - def n_epochs=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::n_epochs - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::n_epochs - - def initialize: - ( - batch_size: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::batch_size, - beta: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::beta, - learning_rate_multiplier: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::learning_rate_multiplier, - n_epochs: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters::n_epochs - ) -> void - | ( - ?OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::hyperparameters - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::hyperparameters - - type batch_size = :auto | Integer - - class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] - end - - type beta = :auto | Float - - class Beta < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] - end - - type learning_rate_multiplier = :auto | Float - - class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] - end - - type n_epochs = :auto | Integer - - class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] - end - end - end - - type supervised = - { - hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters - } - - class Supervised < OpenAI::BaseModel - attr_reader hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters? - - def hyperparameters=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters - - def initialize: - ( - hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters - ) -> void - | ( - ?OpenAI::Models::FineTuning::JobCreateParams::Method::supervised - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::JobCreateParams::Method::supervised - - type hyperparameters = - { - batch_size: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::batch_size, - learning_rate_multiplier: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::learning_rate_multiplier, - n_epochs: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::n_epochs - } - - class Hyperparameters < OpenAI::BaseModel - attr_reader batch_size: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::batch_size? - - def batch_size=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::batch_size - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::batch_size - - attr_reader learning_rate_multiplier: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::learning_rate_multiplier? - - def learning_rate_multiplier=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::learning_rate_multiplier - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::learning_rate_multiplier - - attr_reader n_epochs: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::n_epochs? - - def n_epochs=: ( - OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::n_epochs - ) -> OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::n_epochs - - def initialize: - ( - batch_size: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::batch_size, - learning_rate_multiplier: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::learning_rate_multiplier, - n_epochs: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters::n_epochs - ) -> void - | ( - ?OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::hyperparameters - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::hyperparameters + def dpo=: ( + OpenAI::FineTuning::DpoMethod + ) -> OpenAI::FineTuning::DpoMethod - type batch_size = :auto | Integer + attr_reader reinforcement: OpenAI::FineTuning::ReinforcementMethod? - class BatchSize < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] - end + def reinforcement=: ( + OpenAI::FineTuning::ReinforcementMethod + ) -> OpenAI::FineTuning::ReinforcementMethod - type learning_rate_multiplier = :auto | Float + attr_reader supervised: OpenAI::FineTuning::SupervisedMethod? - class LearningRateMultiplier < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Float]] - end + def supervised=: ( + OpenAI::FineTuning::SupervisedMethod + ) -> OpenAI::FineTuning::SupervisedMethod + + def initialize: ( + type: OpenAI::Models::FineTuning::JobCreateParams::Method::type_, + ?dpo: OpenAI::FineTuning::DpoMethod, + ?reinforcement: OpenAI::FineTuning::ReinforcementMethod, + ?supervised: OpenAI::FineTuning::SupervisedMethod + ) -> void - type n_epochs = :auto | Integer + def to_hash: -> { + type: OpenAI::Models::FineTuning::JobCreateParams::Method::type_, + dpo: OpenAI::FineTuning::DpoMethod, + reinforcement: OpenAI::FineTuning::ReinforcementMethod, + supervised: OpenAI::FineTuning::SupervisedMethod + } - class NEpochs < OpenAI::Union - private def self.variants: -> [[nil, :auto], [nil, Integer]] - end - end - end + type type_ = :supervised | :dpo | :reinforcement - type type_ = :supervised | :dpo + module Type + extend OpenAI::Internal::Type::Enum - class Type < OpenAI::Enum SUPERVISED: :supervised DPO: :dpo + REINFORCEMENT: :reinforcement - def self.values: -> ::Array[OpenAI::Models::FineTuning::JobCreateParams::Method::type_] + def self?.values: -> ::Array[OpenAI::Models::FineTuning::JobCreateParams::Method::type_] end end end diff --git a/sig/openai/models/fine_tuning/job_list_events_params.rbs b/sig/openai/models/fine_tuning/job_list_events_params.rbs index b14a4ba2..42a77bac 100644 --- a/sig/openai/models/fine_tuning/job_list_events_params.rbs +++ b/sig/openai/models/fine_tuning/job_list_events_params.rbs @@ -2,11 +2,12 @@ module OpenAI module Models module FineTuning type job_list_events_params = - { after: String, limit: Integer } & OpenAI::request_parameters + { after: String, limit: Integer } + & OpenAI::Internal::Type::request_parameters - class JobListEventsParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class JobListEventsParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -16,18 +17,17 @@ module OpenAI def limit=: (Integer) -> Integer - def initialize: - ( - after: String, - limit: Integer, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::FineTuning::job_list_events_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?after: String, + ?limit: Integer, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::FineTuning::job_list_events_params + def to_hash: -> { + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/fine_tuning/job_list_params.rbs b/sig/openai/models/fine_tuning/job_list_params.rbs index f4fc50fc..d45893f4 100644 --- a/sig/openai/models/fine_tuning/job_list_params.rbs +++ b/sig/openai/models/fine_tuning/job_list_params.rbs @@ -3,11 +3,11 @@ module OpenAI module FineTuning type job_list_params = { after: String, limit: Integer, metadata: ::Hash[Symbol, String]? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class JobListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class JobListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -19,19 +19,19 @@ module OpenAI attr_accessor metadata: ::Hash[Symbol, String]? - def initialize: - ( - after: String, - limit: Integer, - metadata: ::Hash[Symbol, String]?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::FineTuning::job_list_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::job_list_params + def initialize: ( + ?after: String, + ?limit: Integer, + ?metadata: ::Hash[Symbol, String]?, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + limit: Integer, + metadata: ::Hash[Symbol, String]?, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/fine_tuning/job_pause_params.rbs b/sig/openai/models/fine_tuning/job_pause_params.rbs new file mode 100644 index 00000000..3b55e229 --- /dev/null +++ b/sig/openai/models/fine_tuning/job_pause_params.rbs @@ -0,0 +1,16 @@ +module OpenAI + module Models + module FineTuning + type job_pause_params = { } & OpenAI::Internal::Type::request_parameters + + class JobPauseParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + def initialize: (?request_options: OpenAI::request_opts) -> void + + def to_hash: -> { request_options: OpenAI::RequestOptions } + end + end + end +end diff --git a/sig/openai/models/fine_tuning/job_resume_params.rbs b/sig/openai/models/fine_tuning/job_resume_params.rbs new file mode 100644 index 00000000..3a1d7da9 --- /dev/null +++ b/sig/openai/models/fine_tuning/job_resume_params.rbs @@ -0,0 +1,16 @@ +module OpenAI + module Models + module FineTuning + type job_resume_params = { } & OpenAI::Internal::Type::request_parameters + + class JobResumeParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + def initialize: (?request_options: OpenAI::request_opts) -> void + + def to_hash: -> { request_options: OpenAI::RequestOptions } + end + end + end +end diff --git a/sig/openai/models/fine_tuning/job_retrieve_params.rbs b/sig/openai/models/fine_tuning/job_retrieve_params.rbs index 53329e19..1535b93a 100644 --- a/sig/openai/models/fine_tuning/job_retrieve_params.rbs +++ b/sig/openai/models/fine_tuning/job_retrieve_params.rbs @@ -1,20 +1,16 @@ module OpenAI module Models module FineTuning - type job_retrieve_params = { } & OpenAI::request_parameters + type job_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters - class JobRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class JobRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::FineTuning::job_retrieve_params - | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::FineTuning::job_retrieve_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/fine_tuning/jobs/checkpoint_list_params.rbs b/sig/openai/models/fine_tuning/jobs/checkpoint_list_params.rbs index 5c8dec0b..28b14de0 100644 --- a/sig/openai/models/fine_tuning/jobs/checkpoint_list_params.rbs +++ b/sig/openai/models/fine_tuning/jobs/checkpoint_list_params.rbs @@ -3,11 +3,12 @@ module OpenAI module FineTuning module Jobs type checkpoint_list_params = - { after: String, limit: Integer } & OpenAI::request_parameters + { after: String, limit: Integer } + & OpenAI::Internal::Type::request_parameters - class CheckpointListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class CheckpointListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -17,18 +18,17 @@ module OpenAI def limit=: (Integer) -> Integer - def initialize: - ( - after: String, - limit: Integer, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::FineTuning::Jobs::checkpoint_list_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?after: String, + ?limit: Integer, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::FineTuning::Jobs::checkpoint_list_params + def to_hash: -> { + after: String, + limit: Integer, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbs b/sig/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbs index e3b4c10a..15da3658 100644 --- a/sig/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbs +++ b/sig/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbs @@ -8,12 +8,12 @@ module OpenAI created_at: Integer, fine_tuned_model_checkpoint: String, fine_tuning_job_id: String, - metrics: OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, + metrics: OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, object: :"fine_tuning.job.checkpoint", step_number: Integer } - class FineTuningJobCheckpoint < OpenAI::BaseModel + class FineTuningJobCheckpoint < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created_at: Integer @@ -22,28 +22,31 @@ module OpenAI attr_accessor fine_tuning_job_id: String - attr_accessor metrics: OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics + attr_accessor metrics: OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics attr_accessor object: :"fine_tuning.job.checkpoint" attr_accessor step_number: Integer - def initialize: - ( - id: String, - created_at: Integer, - fine_tuned_model_checkpoint: String, - fine_tuning_job_id: String, - metrics: OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, - step_number: Integer, - object: :"fine_tuning.job.checkpoint" - ) -> void - | ( - ?OpenAI::Models::FineTuning::Jobs::fine_tuning_job_checkpoint - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + created_at: Integer, + fine_tuned_model_checkpoint: String, + fine_tuning_job_id: String, + metrics: OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, + step_number: Integer, + ?object: :"fine_tuning.job.checkpoint" + ) -> void - def to_hash: -> OpenAI::Models::FineTuning::Jobs::fine_tuning_job_checkpoint + def to_hash: -> { + id: String, + created_at: Integer, + fine_tuned_model_checkpoint: String, + fine_tuning_job_id: String, + metrics: OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, + object: :"fine_tuning.job.checkpoint", + step_number: Integer + } type metrics = { @@ -56,7 +59,7 @@ module OpenAI valid_mean_token_accuracy: Float } - class Metrics < OpenAI::BaseModel + class Metrics < OpenAI::Internal::Type::BaseModel attr_reader full_valid_loss: Float? def full_valid_loss=: (Float) -> Float @@ -85,22 +88,25 @@ module OpenAI def valid_mean_token_accuracy=: (Float) -> Float - def initialize: - ( - full_valid_loss: Float, - full_valid_mean_token_accuracy: Float, - step: Float, - train_loss: Float, - train_mean_token_accuracy: Float, - valid_loss: Float, - valid_mean_token_accuracy: Float - ) -> void - | ( - ?OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::metrics - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::metrics + def initialize: ( + ?full_valid_loss: Float, + ?full_valid_mean_token_accuracy: Float, + ?step: Float, + ?train_loss: Float, + ?train_mean_token_accuracy: Float, + ?valid_loss: Float, + ?valid_mean_token_accuracy: Float + ) -> void + + def to_hash: -> { + full_valid_loss: Float, + full_valid_mean_token_accuracy: Float, + step: Float, + train_loss: Float, + train_mean_token_accuracy: Float, + valid_loss: Float, + valid_mean_token_accuracy: Float + } end end end diff --git a/sig/openai/models/fine_tuning/reinforcement_hyperparameters.rbs b/sig/openai/models/fine_tuning/reinforcement_hyperparameters.rbs new file mode 100644 index 00000000..fdd3cc5b --- /dev/null +++ b/sig/openai/models/fine_tuning/reinforcement_hyperparameters.rbs @@ -0,0 +1,141 @@ +module OpenAI + module Models + module FineTuning + type reinforcement_hyperparameters = + { + batch_size: OpenAI::Models::FineTuning::ReinforcementHyperparameters::batch_size, + compute_multiplier: OpenAI::Models::FineTuning::ReinforcementHyperparameters::compute_multiplier, + eval_interval: OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_interval, + eval_samples: OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_samples, + learning_rate_multiplier: OpenAI::Models::FineTuning::ReinforcementHyperparameters::learning_rate_multiplier, + n_epochs: OpenAI::Models::FineTuning::ReinforcementHyperparameters::n_epochs, + reasoning_effort: OpenAI::Models::FineTuning::ReinforcementHyperparameters::reasoning_effort + } + + class ReinforcementHyperparameters < OpenAI::Internal::Type::BaseModel + attr_reader batch_size: OpenAI::Models::FineTuning::ReinforcementHyperparameters::batch_size? + + def batch_size=: ( + OpenAI::Models::FineTuning::ReinforcementHyperparameters::batch_size + ) -> OpenAI::Models::FineTuning::ReinforcementHyperparameters::batch_size + + attr_reader compute_multiplier: OpenAI::Models::FineTuning::ReinforcementHyperparameters::compute_multiplier? + + def compute_multiplier=: ( + OpenAI::Models::FineTuning::ReinforcementHyperparameters::compute_multiplier + ) -> OpenAI::Models::FineTuning::ReinforcementHyperparameters::compute_multiplier + + attr_reader eval_interval: OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_interval? + + def eval_interval=: ( + OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_interval + ) -> OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_interval + + attr_reader eval_samples: OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_samples? + + def eval_samples=: ( + OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_samples + ) -> OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_samples + + attr_reader learning_rate_multiplier: OpenAI::Models::FineTuning::ReinforcementHyperparameters::learning_rate_multiplier? + + def learning_rate_multiplier=: ( + OpenAI::Models::FineTuning::ReinforcementHyperparameters::learning_rate_multiplier + ) -> OpenAI::Models::FineTuning::ReinforcementHyperparameters::learning_rate_multiplier + + attr_reader n_epochs: OpenAI::Models::FineTuning::ReinforcementHyperparameters::n_epochs? + + def n_epochs=: ( + OpenAI::Models::FineTuning::ReinforcementHyperparameters::n_epochs + ) -> OpenAI::Models::FineTuning::ReinforcementHyperparameters::n_epochs + + attr_reader reasoning_effort: OpenAI::Models::FineTuning::ReinforcementHyperparameters::reasoning_effort? + + def reasoning_effort=: ( + OpenAI::Models::FineTuning::ReinforcementHyperparameters::reasoning_effort + ) -> OpenAI::Models::FineTuning::ReinforcementHyperparameters::reasoning_effort + + def initialize: ( + ?batch_size: OpenAI::Models::FineTuning::ReinforcementHyperparameters::batch_size, + ?compute_multiplier: OpenAI::Models::FineTuning::ReinforcementHyperparameters::compute_multiplier, + ?eval_interval: OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_interval, + ?eval_samples: OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_samples, + ?learning_rate_multiplier: OpenAI::Models::FineTuning::ReinforcementHyperparameters::learning_rate_multiplier, + ?n_epochs: OpenAI::Models::FineTuning::ReinforcementHyperparameters::n_epochs, + ?reasoning_effort: OpenAI::Models::FineTuning::ReinforcementHyperparameters::reasoning_effort + ) -> void + + def to_hash: -> { + batch_size: OpenAI::Models::FineTuning::ReinforcementHyperparameters::batch_size, + compute_multiplier: OpenAI::Models::FineTuning::ReinforcementHyperparameters::compute_multiplier, + eval_interval: OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_interval, + eval_samples: OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_samples, + learning_rate_multiplier: OpenAI::Models::FineTuning::ReinforcementHyperparameters::learning_rate_multiplier, + n_epochs: OpenAI::Models::FineTuning::ReinforcementHyperparameters::n_epochs, + reasoning_effort: OpenAI::Models::FineTuning::ReinforcementHyperparameters::reasoning_effort + } + + type batch_size = :auto | Integer + + module BatchSize + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::ReinforcementHyperparameters::batch_size] + end + + type compute_multiplier = :auto | Float + + module ComputeMultiplier + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::ReinforcementHyperparameters::compute_multiplier] + end + + type eval_interval = :auto | Integer + + module EvalInterval + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_interval] + end + + type eval_samples = :auto | Integer + + module EvalSamples + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::ReinforcementHyperparameters::eval_samples] + end + + type learning_rate_multiplier = :auto | Float + + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::ReinforcementHyperparameters::learning_rate_multiplier] + end + + type n_epochs = :auto | Integer + + module NEpochs + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::ReinforcementHyperparameters::n_epochs] + end + + type reasoning_effort = :default | :low | :medium | :high + + module ReasoningEffort + extend OpenAI::Internal::Type::Enum + + DEFAULT: :default + LOW: :low + MEDIUM: :medium + HIGH: :high + + def self?.values: -> ::Array[OpenAI::Models::FineTuning::ReinforcementHyperparameters::reasoning_effort] + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/reinforcement_method.rbs b/sig/openai/models/fine_tuning/reinforcement_method.rbs new file mode 100644 index 00000000..3f637b95 --- /dev/null +++ b/sig/openai/models/fine_tuning/reinforcement_method.rbs @@ -0,0 +1,44 @@ +module OpenAI + module Models + module FineTuning + type reinforcement_method = + { + grader: OpenAI::Models::FineTuning::ReinforcementMethod::grader, + hyperparameters: OpenAI::FineTuning::ReinforcementHyperparameters + } + + class ReinforcementMethod < OpenAI::Internal::Type::BaseModel + attr_accessor grader: OpenAI::Models::FineTuning::ReinforcementMethod::grader + + attr_reader hyperparameters: OpenAI::FineTuning::ReinforcementHyperparameters? + + def hyperparameters=: ( + OpenAI::FineTuning::ReinforcementHyperparameters + ) -> OpenAI::FineTuning::ReinforcementHyperparameters + + def initialize: ( + grader: OpenAI::Models::FineTuning::ReinforcementMethod::grader, + ?hyperparameters: OpenAI::FineTuning::ReinforcementHyperparameters + ) -> void + + def to_hash: -> { + grader: OpenAI::Models::FineTuning::ReinforcementMethod::grader, + hyperparameters: OpenAI::FineTuning::ReinforcementHyperparameters + } + + type grader = + OpenAI::Graders::StringCheckGrader + | OpenAI::Graders::TextSimilarityGrader + | OpenAI::Graders::PythonGrader + | OpenAI::Graders::ScoreModelGrader + | OpenAI::Graders::MultiGrader + + module Grader + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::ReinforcementMethod::grader] + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/supervised_hyperparameters.rbs b/sig/openai/models/fine_tuning/supervised_hyperparameters.rbs new file mode 100644 index 00000000..ce01622a --- /dev/null +++ b/sig/openai/models/fine_tuning/supervised_hyperparameters.rbs @@ -0,0 +1,68 @@ +module OpenAI + module Models + module FineTuning + type supervised_hyperparameters = + { + batch_size: OpenAI::Models::FineTuning::SupervisedHyperparameters::batch_size, + learning_rate_multiplier: OpenAI::Models::FineTuning::SupervisedHyperparameters::learning_rate_multiplier, + n_epochs: OpenAI::Models::FineTuning::SupervisedHyperparameters::n_epochs + } + + class SupervisedHyperparameters < OpenAI::Internal::Type::BaseModel + attr_reader batch_size: OpenAI::Models::FineTuning::SupervisedHyperparameters::batch_size? + + def batch_size=: ( + OpenAI::Models::FineTuning::SupervisedHyperparameters::batch_size + ) -> OpenAI::Models::FineTuning::SupervisedHyperparameters::batch_size + + attr_reader learning_rate_multiplier: OpenAI::Models::FineTuning::SupervisedHyperparameters::learning_rate_multiplier? + + def learning_rate_multiplier=: ( + OpenAI::Models::FineTuning::SupervisedHyperparameters::learning_rate_multiplier + ) -> OpenAI::Models::FineTuning::SupervisedHyperparameters::learning_rate_multiplier + + attr_reader n_epochs: OpenAI::Models::FineTuning::SupervisedHyperparameters::n_epochs? + + def n_epochs=: ( + OpenAI::Models::FineTuning::SupervisedHyperparameters::n_epochs + ) -> OpenAI::Models::FineTuning::SupervisedHyperparameters::n_epochs + + def initialize: ( + ?batch_size: OpenAI::Models::FineTuning::SupervisedHyperparameters::batch_size, + ?learning_rate_multiplier: OpenAI::Models::FineTuning::SupervisedHyperparameters::learning_rate_multiplier, + ?n_epochs: OpenAI::Models::FineTuning::SupervisedHyperparameters::n_epochs + ) -> void + + def to_hash: -> { + batch_size: OpenAI::Models::FineTuning::SupervisedHyperparameters::batch_size, + learning_rate_multiplier: OpenAI::Models::FineTuning::SupervisedHyperparameters::learning_rate_multiplier, + n_epochs: OpenAI::Models::FineTuning::SupervisedHyperparameters::n_epochs + } + + type batch_size = :auto | Integer + + module BatchSize + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::SupervisedHyperparameters::batch_size] + end + + type learning_rate_multiplier = :auto | Float + + module LearningRateMultiplier + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::SupervisedHyperparameters::learning_rate_multiplier] + end + + type n_epochs = :auto | Integer + + module NEpochs + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::FineTuning::SupervisedHyperparameters::n_epochs] + end + end + end + end +end diff --git a/sig/openai/models/fine_tuning/supervised_method.rbs b/sig/openai/models/fine_tuning/supervised_method.rbs new file mode 100644 index 00000000..dbebf97e --- /dev/null +++ b/sig/openai/models/fine_tuning/supervised_method.rbs @@ -0,0 +1,24 @@ +module OpenAI + module Models + module FineTuning + type supervised_method = + { hyperparameters: OpenAI::FineTuning::SupervisedHyperparameters } + + class SupervisedMethod < OpenAI::Internal::Type::BaseModel + attr_reader hyperparameters: OpenAI::FineTuning::SupervisedHyperparameters? + + def hyperparameters=: ( + OpenAI::FineTuning::SupervisedHyperparameters + ) -> OpenAI::FineTuning::SupervisedHyperparameters + + def initialize: ( + ?hyperparameters: OpenAI::FineTuning::SupervisedHyperparameters + ) -> void + + def to_hash: -> { + hyperparameters: OpenAI::FineTuning::SupervisedHyperparameters + } + end + end + end +end diff --git a/sig/openai/models/function_definition.rbs b/sig/openai/models/function_definition.rbs index edff252f..7a9696f3 100644 --- a/sig/openai/models/function_definition.rbs +++ b/sig/openai/models/function_definition.rbs @@ -8,7 +8,7 @@ module OpenAI strict: bool? } - class FunctionDefinition < OpenAI::BaseModel + class FunctionDefinition < OpenAI::Internal::Type::BaseModel attr_accessor name: String attr_reader description: String? @@ -23,18 +23,19 @@ module OpenAI attr_accessor strict: bool? - def initialize: - ( - name: String, - description: String, - parameters: OpenAI::Models::function_parameters, - strict: bool? - ) -> void - | ( - ?OpenAI::Models::function_definition | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::function_definition + def initialize: ( + name: String, + ?description: String, + ?parameters: OpenAI::Models::function_parameters, + ?strict: bool? + ) -> void + + def to_hash: -> { + name: String, + description: String, + parameters: OpenAI::Models::function_parameters, + strict: bool? + } end end end diff --git a/sig/openai/models/function_parameters.rbs b/sig/openai/models/function_parameters.rbs index 6dcb5bc1..9370883a 100644 --- a/sig/openai/models/function_parameters.rbs +++ b/sig/openai/models/function_parameters.rbs @@ -2,6 +2,6 @@ module OpenAI module Models type function_parameters = ::Hash[Symbol, top] - FunctionParameters: function_parameters + FunctionParameters: OpenAI::Internal::Type::Converter end end diff --git a/sig/openai/models/graders/label_model_grader.rbs b/sig/openai/models/graders/label_model_grader.rbs new file mode 100644 index 00000000..29bcb103 --- /dev/null +++ b/sig/openai/models/graders/label_model_grader.rbs @@ -0,0 +1,155 @@ +module OpenAI + module Models + class LabelModelGrader = Graders::LabelModelGrader + + module Graders + type label_model_grader = + { + input: ::Array[OpenAI::Graders::LabelModelGrader::Input], + labels: ::Array[String], + model: String, + name: String, + passing_labels: ::Array[String], + type: :label_model + } + + class LabelModelGrader < OpenAI::Internal::Type::BaseModel + attr_accessor input: ::Array[OpenAI::Graders::LabelModelGrader::Input] + + attr_accessor labels: ::Array[String] + + attr_accessor model: String + + attr_accessor name: String + + attr_accessor passing_labels: ::Array[String] + + attr_accessor type: :label_model + + def initialize: ( + input: ::Array[OpenAI::Graders::LabelModelGrader::Input], + labels: ::Array[String], + model: String, + name: String, + passing_labels: ::Array[String], + ?type: :label_model + ) -> void + + def to_hash: -> { + input: ::Array[OpenAI::Graders::LabelModelGrader::Input], + labels: ::Array[String], + model: String, + name: String, + passing_labels: ::Array[String], + type: :label_model + } + + type input = + { + content: OpenAI::Models::Graders::LabelModelGrader::Input::content, + role: OpenAI::Models::Graders::LabelModelGrader::Input::role, + type: OpenAI::Models::Graders::LabelModelGrader::Input::type_ + } + + class Input < OpenAI::Internal::Type::BaseModel + attr_accessor content: OpenAI::Models::Graders::LabelModelGrader::Input::content + + attr_accessor role: OpenAI::Models::Graders::LabelModelGrader::Input::role + + attr_reader type: OpenAI::Models::Graders::LabelModelGrader::Input::type_? + + def type=: ( + OpenAI::Models::Graders::LabelModelGrader::Input::type_ + ) -> OpenAI::Models::Graders::LabelModelGrader::Input::type_ + + def initialize: ( + content: OpenAI::Models::Graders::LabelModelGrader::Input::content, + role: OpenAI::Models::Graders::LabelModelGrader::Input::role, + ?type: OpenAI::Models::Graders::LabelModelGrader::Input::type_ + ) -> void + + def to_hash: -> { + content: OpenAI::Models::Graders::LabelModelGrader::Input::content, + role: OpenAI::Models::Graders::LabelModelGrader::Input::role, + type: OpenAI::Models::Graders::LabelModelGrader::Input::type_ + } + + type content = + String + | OpenAI::Responses::ResponseInputText + | OpenAI::Graders::LabelModelGrader::Input::Content::OutputText + | OpenAI::Graders::LabelModelGrader::Input::Content::InputImage + | ::Array[top] + + module Content + extend OpenAI::Internal::Type::Union + + type output_text = { text: String, type: :output_text } + + class OutputText < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :output_text + + def initialize: (text: String, ?type: :output_text) -> void + + def to_hash: -> { text: String, type: :output_text } + end + + type input_image = + { image_url: String, type: :input_image, detail: String } + + class InputImage < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: String + + attr_accessor type: :input_image + + attr_reader detail: String? + + def detail=: (String) -> String + + def initialize: ( + image_url: String, + ?detail: String, + ?type: :input_image + ) -> void + + def to_hash: -> { + image_url: String, + type: :input_image, + detail: String + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Graders::LabelModelGrader::Input::content] + + AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter + end + + type role = :user | :assistant | :system | :developer + + module Role + extend OpenAI::Internal::Type::Enum + + USER: :user + ASSISTANT: :assistant + SYSTEM: :system + DEVELOPER: :developer + + def self?.values: -> ::Array[OpenAI::Models::Graders::LabelModelGrader::Input::role] + end + + type type_ = :message + + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE: :message + + def self?.values: -> ::Array[OpenAI::Models::Graders::LabelModelGrader::Input::type_] + end + end + end + end + end +end diff --git a/sig/openai/models/graders/multi_grader.rbs b/sig/openai/models/graders/multi_grader.rbs new file mode 100644 index 00000000..d9ca09fc --- /dev/null +++ b/sig/openai/models/graders/multi_grader.rbs @@ -0,0 +1,52 @@ +module OpenAI + module Models + class MultiGrader = Graders::MultiGrader + + module Graders + type multi_grader = + { + calculate_output: String, + graders: OpenAI::Models::Graders::MultiGrader::graders, + name: String, + type: :multi + } + + class MultiGrader < OpenAI::Internal::Type::BaseModel + attr_accessor calculate_output: String + + attr_accessor graders: OpenAI::Models::Graders::MultiGrader::graders + + attr_accessor name: String + + attr_accessor type: :multi + + def initialize: ( + calculate_output: String, + graders: OpenAI::Models::Graders::MultiGrader::graders, + name: String, + ?type: :multi + ) -> void + + def to_hash: -> { + calculate_output: String, + graders: OpenAI::Models::Graders::MultiGrader::graders, + name: String, + type: :multi + } + + type graders = + OpenAI::Graders::StringCheckGrader + | OpenAI::Graders::TextSimilarityGrader + | OpenAI::Graders::PythonGrader + | OpenAI::Graders::ScoreModelGrader + | OpenAI::Graders::LabelModelGrader + + module Graders + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Graders::MultiGrader::graders] + end + end + end + end +end diff --git a/sig/openai/models/graders/python_grader.rbs b/sig/openai/models/graders/python_grader.rbs new file mode 100644 index 00000000..fdd1b7ac --- /dev/null +++ b/sig/openai/models/graders/python_grader.rbs @@ -0,0 +1,36 @@ +module OpenAI + module Models + class PythonGrader = Graders::PythonGrader + + module Graders + type python_grader = + { name: String, source: String, type: :python, image_tag: String } + + class PythonGrader < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + attr_accessor source: String + + attr_accessor type: :python + + attr_reader image_tag: String? + + def image_tag=: (String) -> String + + def initialize: ( + name: String, + source: String, + ?image_tag: String, + ?type: :python + ) -> void + + def to_hash: -> { + name: String, + source: String, + type: :python, + image_tag: String + } + end + end + end +end diff --git a/sig/openai/models/graders/score_model_grader.rbs b/sig/openai/models/graders/score_model_grader.rbs new file mode 100644 index 00000000..5ff024a4 --- /dev/null +++ b/sig/openai/models/graders/score_model_grader.rbs @@ -0,0 +1,159 @@ +module OpenAI + module Models + class ScoreModelGrader = Graders::ScoreModelGrader + + module Graders + type score_model_grader = + { + input: ::Array[OpenAI::Graders::ScoreModelGrader::Input], + model: String, + name: String, + type: :score_model, + range: ::Array[Float], + sampling_params: top + } + + class ScoreModelGrader < OpenAI::Internal::Type::BaseModel + attr_accessor input: ::Array[OpenAI::Graders::ScoreModelGrader::Input] + + attr_accessor model: String + + attr_accessor name: String + + attr_accessor type: :score_model + + attr_reader range: ::Array[Float]? + + def range=: (::Array[Float]) -> ::Array[Float] + + attr_reader sampling_params: top? + + def sampling_params=: (top) -> top + + def initialize: ( + input: ::Array[OpenAI::Graders::ScoreModelGrader::Input], + model: String, + name: String, + ?range: ::Array[Float], + ?sampling_params: top, + ?type: :score_model + ) -> void + + def to_hash: -> { + input: ::Array[OpenAI::Graders::ScoreModelGrader::Input], + model: String, + name: String, + type: :score_model, + range: ::Array[Float], + sampling_params: top + } + + type input = + { + content: OpenAI::Models::Graders::ScoreModelGrader::Input::content, + role: OpenAI::Models::Graders::ScoreModelGrader::Input::role, + type: OpenAI::Models::Graders::ScoreModelGrader::Input::type_ + } + + class Input < OpenAI::Internal::Type::BaseModel + attr_accessor content: OpenAI::Models::Graders::ScoreModelGrader::Input::content + + attr_accessor role: OpenAI::Models::Graders::ScoreModelGrader::Input::role + + attr_reader type: OpenAI::Models::Graders::ScoreModelGrader::Input::type_? + + def type=: ( + OpenAI::Models::Graders::ScoreModelGrader::Input::type_ + ) -> OpenAI::Models::Graders::ScoreModelGrader::Input::type_ + + def initialize: ( + content: OpenAI::Models::Graders::ScoreModelGrader::Input::content, + role: OpenAI::Models::Graders::ScoreModelGrader::Input::role, + ?type: OpenAI::Models::Graders::ScoreModelGrader::Input::type_ + ) -> void + + def to_hash: -> { + content: OpenAI::Models::Graders::ScoreModelGrader::Input::content, + role: OpenAI::Models::Graders::ScoreModelGrader::Input::role, + type: OpenAI::Models::Graders::ScoreModelGrader::Input::type_ + } + + type content = + String + | OpenAI::Responses::ResponseInputText + | OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText + | OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage + | ::Array[top] + + module Content + extend OpenAI::Internal::Type::Union + + type output_text = { text: String, type: :output_text } + + class OutputText < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :output_text + + def initialize: (text: String, ?type: :output_text) -> void + + def to_hash: -> { text: String, type: :output_text } + end + + type input_image = + { image_url: String, type: :input_image, detail: String } + + class InputImage < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: String + + attr_accessor type: :input_image + + attr_reader detail: String? + + def detail=: (String) -> String + + def initialize: ( + image_url: String, + ?detail: String, + ?type: :input_image + ) -> void + + def to_hash: -> { + image_url: String, + type: :input_image, + detail: String + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Graders::ScoreModelGrader::Input::content] + + AnArrayOfInputTextAndInputImageArray: OpenAI::Internal::Type::Converter + end + + type role = :user | :assistant | :system | :developer + + module Role + extend OpenAI::Internal::Type::Enum + + USER: :user + ASSISTANT: :assistant + SYSTEM: :system + DEVELOPER: :developer + + def self?.values: -> ::Array[OpenAI::Models::Graders::ScoreModelGrader::Input::role] + end + + type type_ = :message + + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE: :message + + def self?.values: -> ::Array[OpenAI::Models::Graders::ScoreModelGrader::Input::type_] + end + end + end + end + end +end diff --git a/sig/openai/models/graders/string_check_grader.rbs b/sig/openai/models/graders/string_check_grader.rbs new file mode 100644 index 00000000..039498de --- /dev/null +++ b/sig/openai/models/graders/string_check_grader.rbs @@ -0,0 +1,57 @@ +module OpenAI + module Models + class StringCheckGrader = Graders::StringCheckGrader + + module Graders + type string_check_grader = + { + input: String, + name: String, + operation: OpenAI::Models::Graders::StringCheckGrader::operation, + reference: String, + type: :string_check + } + + class StringCheckGrader < OpenAI::Internal::Type::BaseModel + attr_accessor input: String + + attr_accessor name: String + + attr_accessor operation: OpenAI::Models::Graders::StringCheckGrader::operation + + attr_accessor reference: String + + attr_accessor type: :string_check + + def initialize: ( + input: String, + name: String, + operation: OpenAI::Models::Graders::StringCheckGrader::operation, + reference: String, + ?type: :string_check + ) -> void + + def to_hash: -> { + input: String, + name: String, + operation: OpenAI::Models::Graders::StringCheckGrader::operation, + reference: String, + type: :string_check + } + + type operation = :eq | :ne | :like | :ilike + + module Operation + extend OpenAI::Internal::Type::Enum + + EQ: :eq + NE: :ne + LIKE: :like + ILIKE: :ilike + + def self?.values: -> ::Array[OpenAI::Models::Graders::StringCheckGrader::operation] + end + end + end + end +end diff --git a/sig/openai/models/graders/text_similarity_grader.rbs b/sig/openai/models/graders/text_similarity_grader.rbs new file mode 100644 index 00000000..e1da4b35 --- /dev/null +++ b/sig/openai/models/graders/text_similarity_grader.rbs @@ -0,0 +1,75 @@ +module OpenAI + module Models + class TextSimilarityGrader = Graders::TextSimilarityGrader + + module Graders + type text_similarity_grader = + { + evaluation_metric: OpenAI::Models::Graders::TextSimilarityGrader::evaluation_metric, + input: String, + name: String, + reference: String, + type: :text_similarity + } + + class TextSimilarityGrader < OpenAI::Internal::Type::BaseModel + attr_accessor evaluation_metric: OpenAI::Models::Graders::TextSimilarityGrader::evaluation_metric + + attr_accessor input: String + + attr_accessor name: String + + attr_accessor reference: String + + attr_accessor type: :text_similarity + + def initialize: ( + evaluation_metric: OpenAI::Models::Graders::TextSimilarityGrader::evaluation_metric, + input: String, + name: String, + reference: String, + ?type: :text_similarity + ) -> void + + def to_hash: -> { + evaluation_metric: OpenAI::Models::Graders::TextSimilarityGrader::evaluation_metric, + input: String, + name: String, + reference: String, + type: :text_similarity + } + + type evaluation_metric = + :cosine + | :fuzzy_match + | :bleu + | :gleu + | :meteor + | :rouge_1 + | :rouge_2 + | :rouge_3 + | :rouge_4 + | :rouge_5 + | :rouge_l + + module EvaluationMetric + extend OpenAI::Internal::Type::Enum + + COSINE: :cosine + FUZZY_MATCH: :fuzzy_match + BLEU: :bleu + GLEU: :gleu + METEOR: :meteor + ROUGE_1: :rouge_1 + ROUGE_2: :rouge_2 + ROUGE_3: :rouge_3 + ROUGE_4: :rouge_4 + ROUGE_5: :rouge_5 + ROUGE_L: :rouge_l + + def self?.values: -> ::Array[OpenAI::Models::Graders::TextSimilarityGrader::evaluation_metric] + end + end + end + end +end diff --git a/sig/openai/models/image.rbs b/sig/openai/models/image.rbs index 3c094e5d..fd7554b0 100644 --- a/sig/openai/models/image.rbs +++ b/sig/openai/models/image.rbs @@ -2,7 +2,7 @@ module OpenAI module Models type image = { :b64_json => String, revised_prompt: String, url: String } - class Image < OpenAI::BaseModel + class Image < OpenAI::Internal::Type::BaseModel attr_reader b64_json: String? def b64_json=: (String) -> String @@ -15,11 +15,17 @@ module OpenAI def url=: (String) -> String - def initialize: - (b64_json: String, revised_prompt: String, url: String) -> void - | (?OpenAI::Models::image | OpenAI::BaseModel data) -> void - - def to_hash: -> OpenAI::Models::image + def initialize: ( + ?b64_json: String, + ?revised_prompt: String, + ?url: String + ) -> void + + def to_hash: -> { + :b64_json => String, + revised_prompt: String, + url: String + } end end end diff --git a/sig/openai/models/image_create_variation_params.rbs b/sig/openai/models/image_create_variation_params.rbs index 983f3cea..47601c14 100644 --- a/sig/openai/models/image_create_variation_params.rbs +++ b/sig/openai/models/image_create_variation_params.rbs @@ -2,20 +2,20 @@ module OpenAI module Models type image_create_variation_params = { - image: (IO | StringIO), + image: OpenAI::Internal::file_input, model: OpenAI::Models::ImageCreateVariationParams::model?, n: Integer?, response_format: OpenAI::Models::ImageCreateVariationParams::response_format?, size: OpenAI::Models::ImageCreateVariationParams::size?, user: String } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class ImageCreateVariationParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ImageCreateVariationParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - attr_accessor image: IO | StringIO + attr_accessor image: OpenAI::Internal::file_input attr_accessor model: OpenAI::Models::ImageCreateVariationParams::model? @@ -29,46 +29,55 @@ module OpenAI def user=: (String) -> String - def initialize: - ( - image: IO | StringIO, - model: OpenAI::Models::ImageCreateVariationParams::model?, - n: Integer?, - response_format: OpenAI::Models::ImageCreateVariationParams::response_format?, - size: OpenAI::Models::ImageCreateVariationParams::size?, - user: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::image_create_variation_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::image_create_variation_params + def initialize: ( + image: OpenAI::Internal::file_input, + ?model: OpenAI::Models::ImageCreateVariationParams::model?, + ?n: Integer?, + ?response_format: OpenAI::Models::ImageCreateVariationParams::response_format?, + ?size: OpenAI::Models::ImageCreateVariationParams::size?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + image: OpenAI::Internal::file_input, + model: OpenAI::Models::ImageCreateVariationParams::model?, + n: Integer?, + response_format: OpenAI::Models::ImageCreateVariationParams::response_format?, + size: OpenAI::Models::ImageCreateVariationParams::size?, + user: String, + request_options: OpenAI::RequestOptions + } type model = String | OpenAI::Models::image_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::image_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::ImageCreateVariationParams::model] end type response_format = :url | :b64_json - class ResponseFormat < OpenAI::Enum + module ResponseFormat + extend OpenAI::Internal::Type::Enum + URL: :url B64_JSON: :b64_json - def self.values: -> ::Array[OpenAI::Models::ImageCreateVariationParams::response_format] + def self?.values: -> ::Array[OpenAI::Models::ImageCreateVariationParams::response_format] end type size = :"256x256" | :"512x512" | :"1024x1024" - class Size < OpenAI::Enum - NUMBER_256X256: :"256x256" - NUMBER_512X512: :"512x512" - NUMBER_1024X1024: :"1024x1024" + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_256X256: :"256x256" + SIZE_512X512: :"512x512" + SIZE_1024X1024: :"1024x1024" - def self.values: -> ::Array[OpenAI::Models::ImageCreateVariationParams::size] + def self?.values: -> ::Array[OpenAI::Models::ImageCreateVariationParams::size] end end end diff --git a/sig/openai/models/image_edit_completed_event.rbs b/sig/openai/models/image_edit_completed_event.rbs new file mode 100644 index 00000000..e21a10d8 --- /dev/null +++ b/sig/openai/models/image_edit_completed_event.rbs @@ -0,0 +1,150 @@ +module OpenAI + module Models + type image_edit_completed_event = + { + :b64_json => String, + background: OpenAI::Models::ImageEditCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditCompletedEvent::output_format, + quality: OpenAI::Models::ImageEditCompletedEvent::quality, + size: OpenAI::Models::ImageEditCompletedEvent::size, + type: :"image_edit.completed", + usage: OpenAI::ImageEditCompletedEvent::Usage + } + + class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor b64_json: String + + attr_accessor background: OpenAI::Models::ImageEditCompletedEvent::background + + attr_accessor created_at: Integer + + attr_accessor output_format: OpenAI::Models::ImageEditCompletedEvent::output_format + + attr_accessor quality: OpenAI::Models::ImageEditCompletedEvent::quality + + attr_accessor size: OpenAI::Models::ImageEditCompletedEvent::size + + attr_accessor type: :"image_edit.completed" + + attr_accessor usage: OpenAI::ImageEditCompletedEvent::Usage + + def initialize: ( + b64_json: String, + background: OpenAI::Models::ImageEditCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditCompletedEvent::output_format, + quality: OpenAI::Models::ImageEditCompletedEvent::quality, + size: OpenAI::Models::ImageEditCompletedEvent::size, + usage: OpenAI::ImageEditCompletedEvent::Usage, + ?type: :"image_edit.completed" + ) -> void + + def to_hash: -> { + :b64_json => String, + background: OpenAI::Models::ImageEditCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditCompletedEvent::output_format, + quality: OpenAI::Models::ImageEditCompletedEvent::quality, + size: OpenAI::Models::ImageEditCompletedEvent::size, + type: :"image_edit.completed", + usage: OpenAI::ImageEditCompletedEvent::Usage + } + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::background] + end + + type output_format = :png | :webp | :jpeg + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + WEBP: :webp + JPEG: :jpeg + + def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::output_format] + end + + type quality = :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::quality] + end + + type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024: :"1024x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_1536X1024: :"1536x1024" + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::size] + end + + type usage = + { + input_tokens: Integer, + input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + + class Usage < OpenAI::Internal::Type::BaseModel + attr_accessor input_tokens: Integer + + attr_accessor input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails + + attr_accessor output_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + input_tokens: Integer, + input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + input_tokens: Integer, + input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + + type input_tokens_details = + { image_tokens: Integer, text_tokens: Integer } + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + attr_accessor image_tokens: Integer + + attr_accessor text_tokens: Integer + + def initialize: (image_tokens: Integer, text_tokens: Integer) -> void + + def to_hash: -> { image_tokens: Integer, text_tokens: Integer } + end + end + end + end +end diff --git a/sig/openai/models/image_edit_params.rbs b/sig/openai/models/image_edit_params.rbs index e791162c..66f4812f 100644 --- a/sig/openai/models/image_edit_params.rbs +++ b/sig/openai/models/image_edit_params.rbs @@ -2,33 +2,51 @@ module OpenAI module Models type image_edit_params = { - image: (IO | StringIO), + image: OpenAI::Models::ImageEditParams::image, prompt: String, - mask: (IO | StringIO), + background: OpenAI::Models::ImageEditParams::background?, + input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?, + mask: OpenAI::Internal::file_input, model: OpenAI::Models::ImageEditParams::model?, n: Integer?, + output_compression: Integer?, + output_format: OpenAI::Models::ImageEditParams::output_format?, + partial_images: Integer?, + quality: OpenAI::Models::ImageEditParams::quality?, response_format: OpenAI::Models::ImageEditParams::response_format?, size: OpenAI::Models::ImageEditParams::size?, user: String } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class ImageEditParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ImageEditParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - attr_accessor image: IO | StringIO + attr_accessor image: OpenAI::Models::ImageEditParams::image attr_accessor prompt: String - attr_reader mask: (IO | StringIO)? + attr_accessor background: OpenAI::Models::ImageEditParams::background? - def mask=: (IO | StringIO) -> (IO | StringIO) + attr_accessor input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity? + + attr_reader mask: OpenAI::Internal::file_input? + + def mask=: (OpenAI::Internal::file_input) -> OpenAI::Internal::file_input attr_accessor model: OpenAI::Models::ImageEditParams::model? attr_accessor n: Integer? + attr_accessor output_compression: Integer? + + attr_accessor output_format: OpenAI::Models::ImageEditParams::output_format? + + attr_accessor partial_images: Integer? + + attr_accessor quality: OpenAI::Models::ImageEditParams::quality? + attr_accessor response_format: OpenAI::Models::ImageEditParams::response_format? attr_accessor size: OpenAI::Models::ImageEditParams::size? @@ -37,45 +55,140 @@ module OpenAI def user=: (String) -> String - def initialize: - ( - image: IO | StringIO, - prompt: String, - mask: IO | StringIO, - model: OpenAI::Models::ImageEditParams::model?, - n: Integer?, - response_format: OpenAI::Models::ImageEditParams::response_format?, - size: OpenAI::Models::ImageEditParams::size?, - user: String, - request_options: OpenAI::request_opts - ) -> void - | (?OpenAI::Models::image_edit_params | OpenAI::BaseModel data) -> void - - def to_hash: -> OpenAI::Models::image_edit_params + def initialize: ( + image: OpenAI::Models::ImageEditParams::image, + prompt: String, + ?background: OpenAI::Models::ImageEditParams::background?, + ?input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?, + ?mask: OpenAI::Internal::file_input, + ?model: OpenAI::Models::ImageEditParams::model?, + ?n: Integer?, + ?output_compression: Integer?, + ?output_format: OpenAI::Models::ImageEditParams::output_format?, + ?partial_images: Integer?, + ?quality: OpenAI::Models::ImageEditParams::quality?, + ?response_format: OpenAI::Models::ImageEditParams::response_format?, + ?size: OpenAI::Models::ImageEditParams::size?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + image: OpenAI::Models::ImageEditParams::image, + prompt: String, + background: OpenAI::Models::ImageEditParams::background?, + input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?, + mask: OpenAI::Internal::file_input, + model: OpenAI::Models::ImageEditParams::model?, + n: Integer?, + output_compression: Integer?, + output_format: OpenAI::Models::ImageEditParams::output_format?, + partial_images: Integer?, + quality: OpenAI::Models::ImageEditParams::quality?, + response_format: OpenAI::Models::ImageEditParams::response_format?, + size: OpenAI::Models::ImageEditParams::size?, + user: String, + request_options: OpenAI::RequestOptions + } + + type image = + OpenAI::Internal::file_input | ::Array[OpenAI::Internal::file_input] + + module Image + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::ImageEditParams::image] + + StringArray: OpenAI::Internal::Type::Converter + end + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::background] + end + + type input_fidelity = :high | :low + + module InputFidelity + extend OpenAI::Internal::Type::Enum + + HIGH: :high + LOW: :low + + def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::input_fidelity] + end type model = String | OpenAI::Models::image_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::image_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::ImageEditParams::model] + end + + type output_format = :png | :jpeg | :webp + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + JPEG: :jpeg + WEBP: :webp + + def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::output_format] + end + + type quality = :standard | :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum + + STANDARD: :standard + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::quality] end type response_format = :url | :b64_json - class ResponseFormat < OpenAI::Enum + module ResponseFormat + extend OpenAI::Internal::Type::Enum + URL: :url B64_JSON: :b64_json - def self.values: -> ::Array[OpenAI::Models::ImageEditParams::response_format] + def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::response_format] end - type size = :"256x256" | :"512x512" | :"1024x1024" - - class Size < OpenAI::Enum - NUMBER_256X256: :"256x256" - NUMBER_512X512: :"512x512" - NUMBER_1024X1024: :"1024x1024" - - def self.values: -> ::Array[OpenAI::Models::ImageEditParams::size] + type size = + :"256x256" + | :"512x512" + | :"1024x1024" + | :"1536x1024" + | :"1024x1536" + | :auto + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_256X256: :"256x256" + SIZE_512X512: :"512x512" + SIZE_1024X1024: :"1024x1024" + SIZE_1536X1024: :"1536x1024" + SIZE_1024X1536: :"1024x1536" + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::size] end end end diff --git a/sig/openai/models/image_edit_partial_image_event.rbs b/sig/openai/models/image_edit_partial_image_event.rbs new file mode 100644 index 00000000..1a96d108 --- /dev/null +++ b/sig/openai/models/image_edit_partial_image_event.rbs @@ -0,0 +1,105 @@ +module OpenAI + module Models + type image_edit_partial_image_event = + { + :b64_json => String, + background: OpenAI::Models::ImageEditPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageEditPartialImageEvent::quality, + size: OpenAI::Models::ImageEditPartialImageEvent::size, + type: :"image_edit.partial_image" + } + + class ImageEditPartialImageEvent < OpenAI::Internal::Type::BaseModel + attr_accessor b64_json: String + + attr_accessor background: OpenAI::Models::ImageEditPartialImageEvent::background + + attr_accessor created_at: Integer + + attr_accessor output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format + + attr_accessor partial_image_index: Integer + + attr_accessor quality: OpenAI::Models::ImageEditPartialImageEvent::quality + + attr_accessor size: OpenAI::Models::ImageEditPartialImageEvent::size + + attr_accessor type: :"image_edit.partial_image" + + def initialize: ( + b64_json: String, + background: OpenAI::Models::ImageEditPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageEditPartialImageEvent::quality, + size: OpenAI::Models::ImageEditPartialImageEvent::size, + ?type: :"image_edit.partial_image" + ) -> void + + def to_hash: -> { + :b64_json => String, + background: OpenAI::Models::ImageEditPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageEditPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageEditPartialImageEvent::quality, + size: OpenAI::Models::ImageEditPartialImageEvent::size, + type: :"image_edit.partial_image" + } + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::background] + end + + type output_format = :png | :webp | :jpeg + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + WEBP: :webp + JPEG: :jpeg + + def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::output_format] + end + + type quality = :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::quality] + end + + type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024: :"1024x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_1536X1024: :"1536x1024" + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageEditPartialImageEvent::size] + end + end + end +end diff --git a/sig/openai/models/image_edit_stream_event.rbs b/sig/openai/models/image_edit_stream_event.rbs new file mode 100644 index 00000000..0b0b65ce --- /dev/null +++ b/sig/openai/models/image_edit_stream_event.rbs @@ -0,0 +1,12 @@ +module OpenAI + module Models + type image_edit_stream_event = + OpenAI::ImageEditPartialImageEvent | OpenAI::ImageEditCompletedEvent + + module ImageEditStreamEvent + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::image_edit_stream_event] + end + end +end diff --git a/sig/openai/models/image_gen_completed_event.rbs b/sig/openai/models/image_gen_completed_event.rbs new file mode 100644 index 00000000..c47de644 --- /dev/null +++ b/sig/openai/models/image_gen_completed_event.rbs @@ -0,0 +1,150 @@ +module OpenAI + module Models + type image_gen_completed_event = + { + :b64_json => String, + background: OpenAI::Models::ImageGenCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenCompletedEvent::output_format, + quality: OpenAI::Models::ImageGenCompletedEvent::quality, + size: OpenAI::Models::ImageGenCompletedEvent::size, + type: :"image_generation.completed", + usage: OpenAI::ImageGenCompletedEvent::Usage + } + + class ImageGenCompletedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor b64_json: String + + attr_accessor background: OpenAI::Models::ImageGenCompletedEvent::background + + attr_accessor created_at: Integer + + attr_accessor output_format: OpenAI::Models::ImageGenCompletedEvent::output_format + + attr_accessor quality: OpenAI::Models::ImageGenCompletedEvent::quality + + attr_accessor size: OpenAI::Models::ImageGenCompletedEvent::size + + attr_accessor type: :"image_generation.completed" + + attr_accessor usage: OpenAI::ImageGenCompletedEvent::Usage + + def initialize: ( + b64_json: String, + background: OpenAI::Models::ImageGenCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenCompletedEvent::output_format, + quality: OpenAI::Models::ImageGenCompletedEvent::quality, + size: OpenAI::Models::ImageGenCompletedEvent::size, + usage: OpenAI::ImageGenCompletedEvent::Usage, + ?type: :"image_generation.completed" + ) -> void + + def to_hash: -> { + :b64_json => String, + background: OpenAI::Models::ImageGenCompletedEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenCompletedEvent::output_format, + quality: OpenAI::Models::ImageGenCompletedEvent::quality, + size: OpenAI::Models::ImageGenCompletedEvent::size, + type: :"image_generation.completed", + usage: OpenAI::ImageGenCompletedEvent::Usage + } + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::background] + end + + type output_format = :png | :webp | :jpeg + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + WEBP: :webp + JPEG: :jpeg + + def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::output_format] + end + + type quality = :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::quality] + end + + type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024: :"1024x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_1536X1024: :"1536x1024" + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenCompletedEvent::size] + end + + type usage = + { + input_tokens: Integer, + input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + + class Usage < OpenAI::Internal::Type::BaseModel + attr_accessor input_tokens: Integer + + attr_accessor input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails + + attr_accessor output_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + input_tokens: Integer, + input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + input_tokens: Integer, + input_tokens_details: OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + + type input_tokens_details = + { image_tokens: Integer, text_tokens: Integer } + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + attr_accessor image_tokens: Integer + + attr_accessor text_tokens: Integer + + def initialize: (image_tokens: Integer, text_tokens: Integer) -> void + + def to_hash: -> { image_tokens: Integer, text_tokens: Integer } + end + end + end + end +end diff --git a/sig/openai/models/image_gen_partial_image_event.rbs b/sig/openai/models/image_gen_partial_image_event.rbs new file mode 100644 index 00000000..bffb443d --- /dev/null +++ b/sig/openai/models/image_gen_partial_image_event.rbs @@ -0,0 +1,105 @@ +module OpenAI + module Models + type image_gen_partial_image_event = + { + :b64_json => String, + background: OpenAI::Models::ImageGenPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageGenPartialImageEvent::quality, + size: OpenAI::Models::ImageGenPartialImageEvent::size, + type: :"image_generation.partial_image" + } + + class ImageGenPartialImageEvent < OpenAI::Internal::Type::BaseModel + attr_accessor b64_json: String + + attr_accessor background: OpenAI::Models::ImageGenPartialImageEvent::background + + attr_accessor created_at: Integer + + attr_accessor output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format + + attr_accessor partial_image_index: Integer + + attr_accessor quality: OpenAI::Models::ImageGenPartialImageEvent::quality + + attr_accessor size: OpenAI::Models::ImageGenPartialImageEvent::size + + attr_accessor type: :"image_generation.partial_image" + + def initialize: ( + b64_json: String, + background: OpenAI::Models::ImageGenPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageGenPartialImageEvent::quality, + size: OpenAI::Models::ImageGenPartialImageEvent::size, + ?type: :"image_generation.partial_image" + ) -> void + + def to_hash: -> { + :b64_json => String, + background: OpenAI::Models::ImageGenPartialImageEvent::background, + created_at: Integer, + output_format: OpenAI::Models::ImageGenPartialImageEvent::output_format, + partial_image_index: Integer, + quality: OpenAI::Models::ImageGenPartialImageEvent::quality, + size: OpenAI::Models::ImageGenPartialImageEvent::size, + type: :"image_generation.partial_image" + } + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::background] + end + + type output_format = :png | :webp | :jpeg + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + WEBP: :webp + JPEG: :jpeg + + def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::output_format] + end + + type quality = :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::quality] + end + + type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024: :"1024x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_1536X1024: :"1536x1024" + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenPartialImageEvent::size] + end + end + end +end diff --git a/sig/openai/models/image_gen_stream_event.rbs b/sig/openai/models/image_gen_stream_event.rbs new file mode 100644 index 00000000..b1489c24 --- /dev/null +++ b/sig/openai/models/image_gen_stream_event.rbs @@ -0,0 +1,12 @@ +module OpenAI + module Models + type image_gen_stream_event = + OpenAI::ImageGenPartialImageEvent | OpenAI::ImageGenCompletedEvent + + module ImageGenStreamEvent + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::image_gen_stream_event] + end + end +end diff --git a/sig/openai/models/image_generate_params.rbs b/sig/openai/models/image_generate_params.rbs index a91c05d0..e870a248 100644 --- a/sig/openai/models/image_generate_params.rbs +++ b/sig/openai/models/image_generate_params.rbs @@ -3,31 +3,42 @@ module OpenAI type image_generate_params = { prompt: String, + background: OpenAI::Models::ImageGenerateParams::background?, model: OpenAI::Models::ImageGenerateParams::model?, + moderation: OpenAI::Models::ImageGenerateParams::moderation?, n: Integer?, - quality: OpenAI::Models::ImageGenerateParams::quality, + output_compression: Integer?, + output_format: OpenAI::Models::ImageGenerateParams::output_format?, + partial_images: Integer?, + quality: OpenAI::Models::ImageGenerateParams::quality?, response_format: OpenAI::Models::ImageGenerateParams::response_format?, size: OpenAI::Models::ImageGenerateParams::size?, style: OpenAI::Models::ImageGenerateParams::style?, user: String } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class ImageGenerateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ImageGenerateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor prompt: String + attr_accessor background: OpenAI::Models::ImageGenerateParams::background? + attr_accessor model: OpenAI::Models::ImageGenerateParams::model? + attr_accessor moderation: OpenAI::Models::ImageGenerateParams::moderation? + attr_accessor n: Integer? - attr_reader quality: OpenAI::Models::ImageGenerateParams::quality? + attr_accessor output_compression: Integer? + + attr_accessor output_format: OpenAI::Models::ImageGenerateParams::output_format? - def quality=: ( - OpenAI::Models::ImageGenerateParams::quality - ) -> OpenAI::Models::ImageGenerateParams::quality + attr_accessor partial_images: Integer? + + attr_accessor quality: OpenAI::Models::ImageGenerateParams::quality? attr_accessor response_format: OpenAI::Models::ImageGenerateParams::response_format? @@ -39,68 +50,143 @@ module OpenAI def user=: (String) -> String - def initialize: - ( - prompt: String, - model: OpenAI::Models::ImageGenerateParams::model?, - n: Integer?, - quality: OpenAI::Models::ImageGenerateParams::quality, - response_format: OpenAI::Models::ImageGenerateParams::response_format?, - size: OpenAI::Models::ImageGenerateParams::size?, - style: OpenAI::Models::ImageGenerateParams::style?, - user: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::image_generate_params | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::image_generate_params + def initialize: ( + prompt: String, + ?background: OpenAI::Models::ImageGenerateParams::background?, + ?model: OpenAI::Models::ImageGenerateParams::model?, + ?moderation: OpenAI::Models::ImageGenerateParams::moderation?, + ?n: Integer?, + ?output_compression: Integer?, + ?output_format: OpenAI::Models::ImageGenerateParams::output_format?, + ?partial_images: Integer?, + ?quality: OpenAI::Models::ImageGenerateParams::quality?, + ?response_format: OpenAI::Models::ImageGenerateParams::response_format?, + ?size: OpenAI::Models::ImageGenerateParams::size?, + ?style: OpenAI::Models::ImageGenerateParams::style?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + prompt: String, + background: OpenAI::Models::ImageGenerateParams::background?, + model: OpenAI::Models::ImageGenerateParams::model?, + moderation: OpenAI::Models::ImageGenerateParams::moderation?, + n: Integer?, + output_compression: Integer?, + output_format: OpenAI::Models::ImageGenerateParams::output_format?, + partial_images: Integer?, + quality: OpenAI::Models::ImageGenerateParams::quality?, + response_format: OpenAI::Models::ImageGenerateParams::response_format?, + size: OpenAI::Models::ImageGenerateParams::size?, + style: OpenAI::Models::ImageGenerateParams::style?, + user: String, + request_options: OpenAI::RequestOptions + } + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenerateParams::background] + end type model = String | OpenAI::Models::image_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::image_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::ImageGenerateParams::model] + end + + type moderation = :low | :auto + + module Moderation + extend OpenAI::Internal::Type::Enum + + LOW: :low + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::ImageGenerateParams::moderation] + end + + type output_format = :png | :jpeg | :webp + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + JPEG: :jpeg + WEBP: :webp + + def self?.values: -> ::Array[OpenAI::Models::ImageGenerateParams::output_format] end - type quality = :standard | :hd + type quality = :standard | :hd | :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum - class Quality < OpenAI::Enum STANDARD: :standard HD: :hd + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto - def self.values: -> ::Array[OpenAI::Models::ImageGenerateParams::quality] + def self?.values: -> ::Array[OpenAI::Models::ImageGenerateParams::quality] end type response_format = :url | :b64_json - class ResponseFormat < OpenAI::Enum + module ResponseFormat + extend OpenAI::Internal::Type::Enum + URL: :url B64_JSON: :b64_json - def self.values: -> ::Array[OpenAI::Models::ImageGenerateParams::response_format] + def self?.values: -> ::Array[OpenAI::Models::ImageGenerateParams::response_format] end type size = - :"256x256" | :"512x512" | :"1024x1024" | :"1792x1024" | :"1024x1792" - - class Size < OpenAI::Enum - NUMBER_256X256: :"256x256" - NUMBER_512X512: :"512x512" - NUMBER_1024X1024: :"1024x1024" - NUMBER_1792X1024: :"1792x1024" - NUMBER_1024X1792: :"1024x1792" - - def self.values: -> ::Array[OpenAI::Models::ImageGenerateParams::size] + :auto + | :"1024x1024" + | :"1536x1024" + | :"1024x1536" + | :"256x256" + | :"512x512" + | :"1792x1024" + | :"1024x1792" + + module Size + extend OpenAI::Internal::Type::Enum + + AUTO: :auto + SIZE_1024X1024: :"1024x1024" + SIZE_1536X1024: :"1536x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_256X256: :"256x256" + SIZE_512X512: :"512x512" + SIZE_1792X1024: :"1792x1024" + SIZE_1024X1792: :"1024x1792" + + def self?.values: -> ::Array[OpenAI::Models::ImageGenerateParams::size] end type style = :vivid | :natural - class Style < OpenAI::Enum + module Style + extend OpenAI::Internal::Type::Enum + VIVID: :vivid NATURAL: :natural - def self.values: -> ::Array[OpenAI::Models::ImageGenerateParams::style] + def self?.values: -> ::Array[OpenAI::Models::ImageGenerateParams::style] end end end diff --git a/sig/openai/models/image_model.rbs b/sig/openai/models/image_model.rbs index 1151fd9b..e41f713d 100644 --- a/sig/openai/models/image_model.rbs +++ b/sig/openai/models/image_model.rbs @@ -1,12 +1,15 @@ module OpenAI module Models - type image_model = :"dall-e-2" | :"dall-e-3" + type image_model = :"dall-e-2" | :"dall-e-3" | :"gpt-image-1" + + module ImageModel + extend OpenAI::Internal::Type::Enum - class ImageModel < OpenAI::Enum DALL_E_2: :"dall-e-2" DALL_E_3: :"dall-e-3" + GPT_IMAGE_1: :"gpt-image-1" - def self.values: -> ::Array[OpenAI::Models::image_model] + def self?.values: -> ::Array[OpenAI::Models::image_model] end end end diff --git a/sig/openai/models/images_response.rbs b/sig/openai/models/images_response.rbs index 97764d62..7cf7b0db 100644 --- a/sig/openai/models/images_response.rbs +++ b/sig/openai/models/images_response.rbs @@ -1,18 +1,164 @@ module OpenAI module Models type images_response = - { created: Integer, data: ::Array[OpenAI::Models::Image] } + { + created: Integer, + background: OpenAI::Models::ImagesResponse::background, + data: ::Array[OpenAI::Image], + output_format: OpenAI::Models::ImagesResponse::output_format, + quality: OpenAI::Models::ImagesResponse::quality, + size: OpenAI::Models::ImagesResponse::size, + usage: OpenAI::ImagesResponse::Usage + } - class ImagesResponse < OpenAI::BaseModel + class ImagesResponse < OpenAI::Internal::Type::BaseModel attr_accessor created: Integer - attr_accessor data: ::Array[OpenAI::Models::Image] + attr_reader background: OpenAI::Models::ImagesResponse::background? - def initialize: - (created: Integer, data: ::Array[OpenAI::Models::Image]) -> void - | (?OpenAI::Models::images_response | OpenAI::BaseModel data) -> void + def background=: ( + OpenAI::Models::ImagesResponse::background + ) -> OpenAI::Models::ImagesResponse::background - def to_hash: -> OpenAI::Models::images_response + attr_reader data: ::Array[OpenAI::Image]? + + def data=: (::Array[OpenAI::Image]) -> ::Array[OpenAI::Image] + + attr_reader output_format: OpenAI::Models::ImagesResponse::output_format? + + def output_format=: ( + OpenAI::Models::ImagesResponse::output_format + ) -> OpenAI::Models::ImagesResponse::output_format + + attr_reader quality: OpenAI::Models::ImagesResponse::quality? + + def quality=: ( + OpenAI::Models::ImagesResponse::quality + ) -> OpenAI::Models::ImagesResponse::quality + + attr_reader size: OpenAI::Models::ImagesResponse::size? + + def size=: ( + OpenAI::Models::ImagesResponse::size + ) -> OpenAI::Models::ImagesResponse::size + + attr_reader usage: OpenAI::ImagesResponse::Usage? + + def usage=: ( + OpenAI::ImagesResponse::Usage + ) -> OpenAI::ImagesResponse::Usage + + def initialize: ( + created: Integer, + ?background: OpenAI::Models::ImagesResponse::background, + ?data: ::Array[OpenAI::Image], + ?output_format: OpenAI::Models::ImagesResponse::output_format, + ?quality: OpenAI::Models::ImagesResponse::quality, + ?size: OpenAI::Models::ImagesResponse::size, + ?usage: OpenAI::ImagesResponse::Usage + ) -> void + + def to_hash: -> { + created: Integer, + background: OpenAI::Models::ImagesResponse::background, + data: ::Array[OpenAI::Image], + output_format: OpenAI::Models::ImagesResponse::output_format, + quality: OpenAI::Models::ImagesResponse::quality, + size: OpenAI::Models::ImagesResponse::size, + usage: OpenAI::ImagesResponse::Usage + } + + type background = :transparent | :opaque + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + + def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::background] + end + + type output_format = :png | :webp | :jpeg + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + WEBP: :webp + JPEG: :jpeg + + def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::output_format] + end + + type quality = :low | :medium | :high + + module Quality + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + + def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::quality] + end + + type size = :"1024x1024" | :"1024x1536" | :"1536x1024" + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024: :"1024x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_1536X1024: :"1536x1024" + + def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::size] + end + + type usage = + { + input_tokens: Integer, + input_tokens_details: OpenAI::ImagesResponse::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + + class Usage < OpenAI::Internal::Type::BaseModel + attr_accessor input_tokens: Integer + + attr_accessor input_tokens_details: OpenAI::ImagesResponse::Usage::InputTokensDetails + + attr_accessor output_tokens: Integer + + attr_accessor total_tokens: Integer + + def initialize: ( + input_tokens: Integer, + input_tokens_details: OpenAI::ImagesResponse::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + ) -> void + + def to_hash: -> { + input_tokens: Integer, + input_tokens_details: OpenAI::ImagesResponse::Usage::InputTokensDetails, + output_tokens: Integer, + total_tokens: Integer + } + + type input_tokens_details = + { image_tokens: Integer, text_tokens: Integer } + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + attr_accessor image_tokens: Integer + + attr_accessor text_tokens: Integer + + def initialize: (image_tokens: Integer, text_tokens: Integer) -> void + + def to_hash: -> { image_tokens: Integer, text_tokens: Integer } + end + end end end end diff --git a/sig/openai/models/metadata.rbs b/sig/openai/models/metadata.rbs index 304e17b7..74e531c1 100644 --- a/sig/openai/models/metadata.rbs +++ b/sig/openai/models/metadata.rbs @@ -2,6 +2,6 @@ module OpenAI module Models type metadata = ::Hash[Symbol, String]? - Metadata: metadata + Metadata: OpenAI::Internal::Type::Converter end end diff --git a/sig/openai/models/model.rbs b/sig/openai/models/model.rbs index 4d5cd661..f6d604c9 100644 --- a/sig/openai/models/model.rbs +++ b/sig/openai/models/model.rbs @@ -3,7 +3,7 @@ module OpenAI type model = { id: String, created: Integer, object: :model, owned_by: String } - class Model < OpenAI::BaseModel + class Model < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created: Integer @@ -12,11 +12,19 @@ module OpenAI attr_accessor owned_by: String - def initialize: - (id: String, created: Integer, owned_by: String, object: :model) -> void - | (?OpenAI::Models::model | OpenAI::BaseModel data) -> void + def initialize: ( + id: String, + created: Integer, + owned_by: String, + ?object: :model + ) -> void - def to_hash: -> OpenAI::Models::model + def to_hash: -> { + id: String, + created: Integer, + object: :model, + owned_by: String + } end end end diff --git a/sig/openai/models/model_delete_params.rbs b/sig/openai/models/model_delete_params.rbs index 4ea85eb1..68406d8c 100644 --- a/sig/openai/models/model_delete_params.rbs +++ b/sig/openai/models/model_delete_params.rbs @@ -1,18 +1,14 @@ module OpenAI module Models - type model_delete_params = { } & OpenAI::request_parameters + type model_delete_params = { } & OpenAI::Internal::Type::request_parameters - class ModelDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ModelDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::model_delete_params | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::model_delete_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/model_deleted.rbs b/sig/openai/models/model_deleted.rbs index 47e9b6bb..2676f627 100644 --- a/sig/openai/models/model_deleted.rbs +++ b/sig/openai/models/model_deleted.rbs @@ -2,18 +2,16 @@ module OpenAI module Models type model_deleted = { id: String, deleted: bool, object: String } - class ModelDeleted < OpenAI::BaseModel + class ModelDeleted < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor deleted: bool attr_accessor object: String - def initialize: - (id: String, deleted: bool, object: String) -> void - | (?OpenAI::Models::model_deleted | OpenAI::BaseModel data) -> void + def initialize: (id: String, deleted: bool, object: String) -> void - def to_hash: -> OpenAI::Models::model_deleted + def to_hash: -> { id: String, deleted: bool, object: String } end end end diff --git a/sig/openai/models/model_list_params.rbs b/sig/openai/models/model_list_params.rbs index 258c64c8..915bdcf6 100644 --- a/sig/openai/models/model_list_params.rbs +++ b/sig/openai/models/model_list_params.rbs @@ -1,16 +1,14 @@ module OpenAI module Models - type model_list_params = { } & OpenAI::request_parameters + type model_list_params = { } & OpenAI::Internal::Type::request_parameters - class ModelListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ModelListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | (?OpenAI::Models::model_list_params | OpenAI::BaseModel data) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::model_list_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/model_retrieve_params.rbs b/sig/openai/models/model_retrieve_params.rbs index 78cbe28c..c764c41a 100644 --- a/sig/openai/models/model_retrieve_params.rbs +++ b/sig/openai/models/model_retrieve_params.rbs @@ -1,18 +1,15 @@ module OpenAI module Models - type model_retrieve_params = { } & OpenAI::request_parameters + type model_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters - class ModelRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ModelRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::model_retrieve_params | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::model_retrieve_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/moderation.rbs b/sig/openai/models/moderation.rbs index a3717393..379bbdc3 100644 --- a/sig/openai/models/moderation.rbs +++ b/sig/openai/models/moderation.rbs @@ -2,31 +2,34 @@ module OpenAI module Models type moderation = { - categories: OpenAI::Models::Moderation::Categories, - category_applied_input_types: OpenAI::Models::Moderation::CategoryAppliedInputTypes, - category_scores: OpenAI::Models::Moderation::CategoryScores, + categories: OpenAI::Moderation::Categories, + category_applied_input_types: OpenAI::Moderation::CategoryAppliedInputTypes, + category_scores: OpenAI::Moderation::CategoryScores, flagged: bool } - class Moderation < OpenAI::BaseModel - attr_accessor categories: OpenAI::Models::Moderation::Categories + class Moderation < OpenAI::Internal::Type::BaseModel + attr_accessor categories: OpenAI::Moderation::Categories - attr_accessor category_applied_input_types: OpenAI::Models::Moderation::CategoryAppliedInputTypes + attr_accessor category_applied_input_types: OpenAI::Moderation::CategoryAppliedInputTypes - attr_accessor category_scores: OpenAI::Models::Moderation::CategoryScores + attr_accessor category_scores: OpenAI::Moderation::CategoryScores attr_accessor flagged: bool - def initialize: - ( - categories: OpenAI::Models::Moderation::Categories, - category_applied_input_types: OpenAI::Models::Moderation::CategoryAppliedInputTypes, - category_scores: OpenAI::Models::Moderation::CategoryScores, - flagged: bool - ) -> void - | (?OpenAI::Models::moderation | OpenAI::BaseModel data) -> void + def initialize: ( + categories: OpenAI::Moderation::Categories, + category_applied_input_types: OpenAI::Moderation::CategoryAppliedInputTypes, + category_scores: OpenAI::Moderation::CategoryScores, + flagged: bool + ) -> void - def to_hash: -> OpenAI::Models::moderation + def to_hash: -> { + categories: OpenAI::Moderation::Categories, + category_applied_input_types: OpenAI::Moderation::CategoryAppliedInputTypes, + category_scores: OpenAI::Moderation::CategoryScores, + flagged: bool + } type categories = { @@ -45,7 +48,7 @@ module OpenAI violence_graphic: bool } - class Categories < OpenAI::BaseModel + class Categories < OpenAI::Internal::Type::BaseModel attr_accessor harassment: bool attr_accessor harassment_threatening: bool @@ -72,27 +75,37 @@ module OpenAI attr_accessor violence_graphic: bool - def initialize: - ( - harassment: bool, - harassment_threatening: bool, - hate: bool, - hate_threatening: bool, - illicit: bool?, - illicit_violent: bool?, - self_harm: bool, - self_harm_instructions: bool, - self_harm_intent: bool, - sexual: bool, - sexual_minors: bool, - violence: bool, - violence_graphic: bool - ) -> void - | ( - ?OpenAI::Models::Moderation::categories | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Moderation::categories + def initialize: ( + harassment: bool, + harassment_threatening: bool, + hate: bool, + hate_threatening: bool, + illicit: bool?, + illicit_violent: bool?, + self_harm: bool, + self_harm_instructions: bool, + self_harm_intent: bool, + sexual: bool, + sexual_minors: bool, + violence: bool, + violence_graphic: bool + ) -> void + + def to_hash: -> { + harassment: bool, + harassment_threatening: bool, + hate: bool, + hate_threatening: bool, + illicit: bool?, + illicit_violent: bool?, + self_harm: bool, + self_harm_instructions: bool, + self_harm_intent: bool, + sexual: bool, + sexual_minors: bool, + violence: bool, + violence_graphic: bool + } end type category_applied_input_types = @@ -112,7 +125,7 @@ module OpenAI violence_graphic: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence_graphic] } - class CategoryAppliedInputTypes < OpenAI::BaseModel + class CategoryAppliedInputTypes < OpenAI::Internal::Type::BaseModel attr_accessor harassment: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment] attr_accessor harassment_threatening: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment_threatening] @@ -139,137 +152,172 @@ module OpenAI attr_accessor violence_graphic: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence_graphic] - def initialize: - ( - harassment: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment], - harassment_threatening: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment_threatening], - hate: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::hate], - hate_threatening: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::hate_threatening], - illicit: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::illicit], - illicit_violent: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::illicit_violent], - self_harm: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm], - self_harm_instructions: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm_instruction], - self_harm_intent: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm_intent], - sexual: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::sexual], - sexual_minors: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::sexual_minor], - violence: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence], - violence_graphic: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence_graphic] - ) -> void - | ( - ?OpenAI::Models::Moderation::category_applied_input_types - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Moderation::category_applied_input_types + def initialize: ( + harassment: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment], + harassment_threatening: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment_threatening], + hate: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::hate], + hate_threatening: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::hate_threatening], + illicit: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::illicit], + illicit_violent: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::illicit_violent], + self_harm: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm], + self_harm_instructions: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm_instruction], + self_harm_intent: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm_intent], + sexual: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::sexual], + sexual_minors: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::sexual_minor], + violence: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence], + violence_graphic: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence_graphic] + ) -> void + + def to_hash: -> { + harassment: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment], + harassment_threatening: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment_threatening], + hate: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::hate], + hate_threatening: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::hate_threatening], + illicit: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::illicit], + illicit_violent: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::illicit_violent], + self_harm: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm], + self_harm_instructions: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm_instruction], + self_harm_intent: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm_intent], + sexual: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::sexual], + sexual_minors: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::sexual_minor], + violence: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence], + violence_graphic: ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence_graphic] + } type harassment = :text - class Harassment < OpenAI::Enum + module Harassment + extend OpenAI::Internal::Type::Enum + TEXT: :text - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment] end type harassment_threatening = :text - class HarassmentThreatening < OpenAI::Enum + module HarassmentThreatening + extend OpenAI::Internal::Type::Enum + TEXT: :text - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment_threatening] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::harassment_threatening] end type hate = :text - class Hate < OpenAI::Enum + module Hate + extend OpenAI::Internal::Type::Enum + TEXT: :text - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::hate] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::hate] end type hate_threatening = :text - class HateThreatening < OpenAI::Enum + module HateThreatening + extend OpenAI::Internal::Type::Enum + TEXT: :text - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::hate_threatening] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::hate_threatening] end type illicit = :text - class Illicit < OpenAI::Enum + module Illicit + extend OpenAI::Internal::Type::Enum + TEXT: :text - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::illicit] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::illicit] end type illicit_violent = :text - class IllicitViolent < OpenAI::Enum + module IllicitViolent + extend OpenAI::Internal::Type::Enum + TEXT: :text - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::illicit_violent] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::illicit_violent] end type self_harm = :text | :image - class SelfHarm < OpenAI::Enum + module SelfHarm + extend OpenAI::Internal::Type::Enum + TEXT: :text IMAGE: :image - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm] end type self_harm_instruction = :text | :image - class SelfHarmInstruction < OpenAI::Enum + module SelfHarmInstruction + extend OpenAI::Internal::Type::Enum + TEXT: :text IMAGE: :image - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm_instruction] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm_instruction] end type self_harm_intent = :text | :image - class SelfHarmIntent < OpenAI::Enum + module SelfHarmIntent + extend OpenAI::Internal::Type::Enum + TEXT: :text IMAGE: :image - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm_intent] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::self_harm_intent] end type sexual = :text | :image - class Sexual < OpenAI::Enum + module Sexual + extend OpenAI::Internal::Type::Enum + TEXT: :text IMAGE: :image - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::sexual] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::sexual] end type sexual_minor = :text - class SexualMinor < OpenAI::Enum + module SexualMinor + extend OpenAI::Internal::Type::Enum + TEXT: :text - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::sexual_minor] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::sexual_minor] end type violence = :text | :image - class Violence < OpenAI::Enum + module Violence + extend OpenAI::Internal::Type::Enum + TEXT: :text IMAGE: :image - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence] end type violence_graphic = :text | :image - class ViolenceGraphic < OpenAI::Enum + module ViolenceGraphic + extend OpenAI::Internal::Type::Enum + TEXT: :text IMAGE: :image - def self.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence_graphic] + def self?.values: -> ::Array[OpenAI::Models::Moderation::CategoryAppliedInputTypes::violence_graphic] end end @@ -290,7 +338,7 @@ module OpenAI violence_graphic: Float } - class CategoryScores < OpenAI::BaseModel + class CategoryScores < OpenAI::Internal::Type::BaseModel attr_accessor harassment: Float attr_accessor harassment_threatening: Float @@ -317,28 +365,37 @@ module OpenAI attr_accessor violence_graphic: Float - def initialize: - ( - harassment: Float, - harassment_threatening: Float, - hate: Float, - hate_threatening: Float, - illicit: Float, - illicit_violent: Float, - self_harm: Float, - self_harm_instructions: Float, - self_harm_intent: Float, - sexual: Float, - sexual_minors: Float, - violence: Float, - violence_graphic: Float - ) -> void - | ( - ?OpenAI::Models::Moderation::category_scores - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Moderation::category_scores + def initialize: ( + harassment: Float, + harassment_threatening: Float, + hate: Float, + hate_threatening: Float, + illicit: Float, + illicit_violent: Float, + self_harm: Float, + self_harm_instructions: Float, + self_harm_intent: Float, + sexual: Float, + sexual_minors: Float, + violence: Float, + violence_graphic: Float + ) -> void + + def to_hash: -> { + harassment: Float, + harassment_threatening: Float, + hate: Float, + hate_threatening: Float, + illicit: Float, + illicit_violent: Float, + self_harm: Float, + self_harm_instructions: Float, + self_harm_intent: Float, + sexual: Float, + sexual_minors: Float, + violence: Float, + violence_graphic: Float + } end end end diff --git a/sig/openai/models/moderation_create_params.rbs b/sig/openai/models/moderation_create_params.rbs index 0ea15edd..832d4052 100644 --- a/sig/openai/models/moderation_create_params.rbs +++ b/sig/openai/models/moderation_create_params.rbs @@ -5,11 +5,11 @@ module OpenAI input: OpenAI::Models::ModerationCreateParams::input, model: OpenAI::Models::ModerationCreateParams::model } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class ModerationCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ModerationCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor input: OpenAI::Models::ModerationCreateParams::input @@ -19,40 +19,39 @@ module OpenAI OpenAI::Models::ModerationCreateParams::model ) -> OpenAI::Models::ModerationCreateParams::model - def initialize: - ( - input: OpenAI::Models::ModerationCreateParams::input, - model: OpenAI::Models::ModerationCreateParams::model, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::moderation_create_params | OpenAI::BaseModel data - ) -> void + def initialize: ( + input: OpenAI::Models::ModerationCreateParams::input, + ?model: OpenAI::Models::ModerationCreateParams::model, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::moderation_create_params + def to_hash: -> { + input: OpenAI::Models::ModerationCreateParams::input, + model: OpenAI::Models::ModerationCreateParams::model, + request_options: OpenAI::RequestOptions + } type input = String | ::Array[String] | ::Array[OpenAI::Models::moderation_multi_modal_input] - class Input < OpenAI::Union - type string_array = ::Array[String] + module Input + extend OpenAI::Internal::Type::Union - StringArray: string_array + def self?.variants: -> ::Array[OpenAI::Models::ModerationCreateParams::input] - type moderation_multi_modal_input_array = - ::Array[OpenAI::Models::moderation_multi_modal_input] + StringArray: OpenAI::Internal::Type::Converter - ModerationMultiModalInputArray: moderation_multi_modal_input_array - - private def self.variants: -> [[nil, String], [nil, ::Array[String]], [nil, ::Array[OpenAI::Models::moderation_multi_modal_input]]] + ModerationMultiModalInputArray: OpenAI::Internal::Type::Converter end type model = String | OpenAI::Models::moderation_model - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::moderation_model]] + module Model + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::ModerationCreateParams::model] end end end diff --git a/sig/openai/models/moderation_create_response.rbs b/sig/openai/models/moderation_create_response.rbs index faa955af..6f9757a1 100644 --- a/sig/openai/models/moderation_create_response.rbs +++ b/sig/openai/models/moderation_create_response.rbs @@ -1,30 +1,26 @@ module OpenAI module Models type moderation_create_response = - { - id: String, - model: String, - results: ::Array[OpenAI::Models::Moderation] - } + { id: String, model: String, results: ::Array[OpenAI::Moderation] } - class ModerationCreateResponse < OpenAI::BaseModel + class ModerationCreateResponse < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor model: String - attr_accessor results: ::Array[OpenAI::Models::Moderation] + attr_accessor results: ::Array[OpenAI::Moderation] - def initialize: - ( - id: String, - model: String, - results: ::Array[OpenAI::Models::Moderation] - ) -> void - | ( - ?OpenAI::Models::moderation_create_response | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + model: String, + results: ::Array[OpenAI::Moderation] + ) -> void - def to_hash: -> OpenAI::Models::moderation_create_response + def to_hash: -> { + id: String, + model: String, + results: ::Array[OpenAI::Moderation] + } end end end diff --git a/sig/openai/models/moderation_image_url_input.rbs b/sig/openai/models/moderation_image_url_input.rbs index b780a3e0..d373c517 100644 --- a/sig/openai/models/moderation_image_url_input.rbs +++ b/sig/openai/models/moderation_image_url_input.rbs @@ -1,40 +1,31 @@ module OpenAI module Models type moderation_image_url_input = - { - image_url: OpenAI::Models::ModerationImageURLInput::ImageURL, - type: :image_url - } + { image_url: OpenAI::ModerationImageURLInput::ImageURL, type: :image_url } - class ModerationImageURLInput < OpenAI::BaseModel - attr_accessor image_url: OpenAI::Models::ModerationImageURLInput::ImageURL + class ModerationImageURLInput < OpenAI::Internal::Type::BaseModel + attr_accessor image_url: OpenAI::ModerationImageURLInput::ImageURL attr_accessor type: :image_url - def initialize: - ( - image_url: OpenAI::Models::ModerationImageURLInput::ImageURL, - type: :image_url - ) -> void - | ( - ?OpenAI::Models::moderation_image_url_input | OpenAI::BaseModel data - ) -> void + def initialize: ( + image_url: OpenAI::ModerationImageURLInput::ImageURL, + ?type: :image_url + ) -> void - def to_hash: -> OpenAI::Models::moderation_image_url_input + def to_hash: -> { + image_url: OpenAI::ModerationImageURLInput::ImageURL, + type: :image_url + } type image_url = { url: String } - class ImageURL < OpenAI::BaseModel + class ImageURL < OpenAI::Internal::Type::BaseModel attr_accessor url: String - def initialize: - (url: String) -> void - | ( - ?OpenAI::Models::ModerationImageURLInput::image_url - | OpenAI::BaseModel data - ) -> void + def initialize: (url: String) -> void - def to_hash: -> OpenAI::Models::ModerationImageURLInput::image_url + def to_hash: -> { url: String } end end end diff --git a/sig/openai/models/moderation_model.rbs b/sig/openai/models/moderation_model.rbs index 6fb884f3..228342be 100644 --- a/sig/openai/models/moderation_model.rbs +++ b/sig/openai/models/moderation_model.rbs @@ -6,13 +6,15 @@ module OpenAI | :"text-moderation-latest" | :"text-moderation-stable" - class ModerationModel < OpenAI::Enum + module ModerationModel + extend OpenAI::Internal::Type::Enum + OMNI_MODERATION_LATEST: :"omni-moderation-latest" OMNI_MODERATION_2024_09_26: :"omni-moderation-2024-09-26" TEXT_MODERATION_LATEST: :"text-moderation-latest" TEXT_MODERATION_STABLE: :"text-moderation-stable" - def self.values: -> ::Array[OpenAI::Models::moderation_model] + def self?.values: -> ::Array[OpenAI::Models::moderation_model] end end end diff --git a/sig/openai/models/moderation_multi_modal_input.rbs b/sig/openai/models/moderation_multi_modal_input.rbs index af2b3a37..ba782390 100644 --- a/sig/openai/models/moderation_multi_modal_input.rbs +++ b/sig/openai/models/moderation_multi_modal_input.rbs @@ -1,11 +1,12 @@ module OpenAI module Models type moderation_multi_modal_input = - OpenAI::Models::ModerationImageURLInput - | OpenAI::Models::ModerationTextInput + OpenAI::ModerationImageURLInput | OpenAI::ModerationTextInput - class ModerationMultiModalInput < OpenAI::Union - private def self.variants: -> [[:image_url, OpenAI::Models::ModerationImageURLInput], [:text, OpenAI::Models::ModerationTextInput]] + module ModerationMultiModalInput + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::moderation_multi_modal_input] end end end diff --git a/sig/openai/models/moderation_text_input.rbs b/sig/openai/models/moderation_text_input.rbs index 58c7920b..fea26990 100644 --- a/sig/openai/models/moderation_text_input.rbs +++ b/sig/openai/models/moderation_text_input.rbs @@ -2,18 +2,14 @@ module OpenAI module Models type moderation_text_input = { text: String, type: :text } - class ModerationTextInput < OpenAI::BaseModel + class ModerationTextInput < OpenAI::Internal::Type::BaseModel attr_accessor text: String attr_accessor type: :text - def initialize: - (text: String, type: :text) -> void - | ( - ?OpenAI::Models::moderation_text_input | OpenAI::BaseModel data - ) -> void + def initialize: (text: String, ?type: :text) -> void - def to_hash: -> OpenAI::Models::moderation_text_input + def to_hash: -> { text: String, type: :text } end end end diff --git a/sig/openai/models/other_file_chunking_strategy_object.rbs b/sig/openai/models/other_file_chunking_strategy_object.rbs index 253ded71..10298f79 100644 --- a/sig/openai/models/other_file_chunking_strategy_object.rbs +++ b/sig/openai/models/other_file_chunking_strategy_object.rbs @@ -2,17 +2,12 @@ module OpenAI module Models type other_file_chunking_strategy_object = { type: :other } - class OtherFileChunkingStrategyObject < OpenAI::BaseModel + class OtherFileChunkingStrategyObject < OpenAI::Internal::Type::BaseModel attr_accessor type: :other - def initialize: - (type: :other) -> void - | ( - ?OpenAI::Models::other_file_chunking_strategy_object - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :other) -> void - def to_hash: -> OpenAI::Models::other_file_chunking_strategy_object + def to_hash: -> { type: :other } end end end diff --git a/sig/openai/models/reasoning.rbs b/sig/openai/models/reasoning.rbs index 3a38b298..7a7e744d 100644 --- a/sig/openai/models/reasoning.rbs +++ b/sig/openai/models/reasoning.rbs @@ -3,30 +3,51 @@ module OpenAI type reasoning = { effort: OpenAI::Models::reasoning_effort?, - generate_summary: OpenAI::Models::Reasoning::generate_summary? + generate_summary: OpenAI::Models::Reasoning::generate_summary?, + summary: OpenAI::Models::Reasoning::summary? } - class Reasoning < OpenAI::BaseModel + class Reasoning < OpenAI::Internal::Type::BaseModel attr_accessor effort: OpenAI::Models::reasoning_effort? attr_accessor generate_summary: OpenAI::Models::Reasoning::generate_summary? - def initialize: - ( - effort: OpenAI::Models::reasoning_effort?, - generate_summary: OpenAI::Models::Reasoning::generate_summary? - ) -> void - | (?OpenAI::Models::reasoning | OpenAI::BaseModel data) -> void + attr_accessor summary: OpenAI::Models::Reasoning::summary? - def to_hash: -> OpenAI::Models::reasoning + def initialize: ( + ?effort: OpenAI::Models::reasoning_effort?, + ?generate_summary: OpenAI::Models::Reasoning::generate_summary?, + ?summary: OpenAI::Models::Reasoning::summary? + ) -> void - type generate_summary = :concise | :detailed + def to_hash: -> { + effort: OpenAI::Models::reasoning_effort?, + generate_summary: OpenAI::Models::Reasoning::generate_summary?, + summary: OpenAI::Models::Reasoning::summary? + } + + type generate_summary = :auto | :concise | :detailed + + module GenerateSummary + extend OpenAI::Internal::Type::Enum + + AUTO: :auto + CONCISE: :concise + DETAILED: :detailed + + def self?.values: -> ::Array[OpenAI::Models::Reasoning::generate_summary] + end + + type summary = :auto | :concise | :detailed + + module Summary + extend OpenAI::Internal::Type::Enum - class GenerateSummary < OpenAI::Enum + AUTO: :auto CONCISE: :concise DETAILED: :detailed - def self.values: -> ::Array[OpenAI::Models::Reasoning::generate_summary] + def self?.values: -> ::Array[OpenAI::Models::Reasoning::summary] end end end diff --git a/sig/openai/models/reasoning_effort.rbs b/sig/openai/models/reasoning_effort.rbs index 57327554..2245e639 100644 --- a/sig/openai/models/reasoning_effort.rbs +++ b/sig/openai/models/reasoning_effort.rbs @@ -1,13 +1,16 @@ module OpenAI module Models - type reasoning_effort = :low | :medium | :high + type reasoning_effort = :minimal | :low | :medium | :high - class ReasoningEffort < OpenAI::Enum + module ReasoningEffort + extend OpenAI::Internal::Type::Enum + + MINIMAL: :minimal LOW: :low MEDIUM: :medium HIGH: :high - def self.values: -> ::Array[OpenAI::Models::reasoning_effort] + def self?.values: -> ::Array[OpenAI::Models::reasoning_effort] end end end diff --git a/sig/openai/models/response_format_json_object.rbs b/sig/openai/models/response_format_json_object.rbs index 3e129a52..f8141178 100644 --- a/sig/openai/models/response_format_json_object.rbs +++ b/sig/openai/models/response_format_json_object.rbs @@ -2,16 +2,12 @@ module OpenAI module Models type response_format_json_object = { type: :json_object } - class ResponseFormatJSONObject < OpenAI::BaseModel + class ResponseFormatJSONObject < OpenAI::Internal::Type::BaseModel attr_accessor type: :json_object - def initialize: - (type: :json_object) -> void - | ( - ?OpenAI::Models::response_format_json_object | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :json_object) -> void - def to_hash: -> OpenAI::Models::response_format_json_object + def to_hash: -> { type: :json_object } end end end diff --git a/sig/openai/models/response_format_json_schema.rbs b/sig/openai/models/response_format_json_schema.rbs index 7b3816a5..e806b510 100644 --- a/sig/openai/models/response_format_json_schema.rbs +++ b/sig/openai/models/response_format_json_schema.rbs @@ -2,25 +2,24 @@ module OpenAI module Models type response_format_json_schema = { - json_schema: OpenAI::Models::ResponseFormatJSONSchema::JSONSchema, + json_schema: OpenAI::ResponseFormatJSONSchema::JSONSchema, type: :json_schema } - class ResponseFormatJSONSchema < OpenAI::BaseModel - attr_accessor json_schema: OpenAI::Models::ResponseFormatJSONSchema::JSONSchema + class ResponseFormatJSONSchema < OpenAI::Internal::Type::BaseModel + attr_accessor json_schema: OpenAI::ResponseFormatJSONSchema::JSONSchema attr_accessor type: :json_schema - def initialize: - ( - json_schema: OpenAI::Models::ResponseFormatJSONSchema::JSONSchema, - type: :json_schema - ) -> void - | ( - ?OpenAI::Models::response_format_json_schema | OpenAI::BaseModel data - ) -> void + def initialize: ( + json_schema: OpenAI::ResponseFormatJSONSchema::JSONSchema, + ?type: :json_schema + ) -> void - def to_hash: -> OpenAI::Models::response_format_json_schema + def to_hash: -> { + json_schema: OpenAI::ResponseFormatJSONSchema::JSONSchema, + type: :json_schema + } type json_schema = { @@ -30,7 +29,7 @@ module OpenAI strict: bool? } - class JSONSchema < OpenAI::BaseModel + class JSONSchema < OpenAI::Internal::Type::BaseModel attr_accessor name: String attr_reader description: String? @@ -43,19 +42,19 @@ module OpenAI attr_accessor strict: bool? - def initialize: - ( - name: String, - description: String, - schema: ::Hash[Symbol, top], - strict: bool? - ) -> void - | ( - ?OpenAI::Models::ResponseFormatJSONSchema::json_schema - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::ResponseFormatJSONSchema::json_schema + def initialize: ( + name: String, + ?description: String, + ?schema: ::Hash[Symbol, top], + ?strict: bool? + ) -> void + + def to_hash: -> { + name: String, + description: String, + schema: ::Hash[Symbol, top], + strict: bool? + } end end end diff --git a/sig/openai/models/response_format_text.rbs b/sig/openai/models/response_format_text.rbs index c6c2619d..305c9a4d 100644 --- a/sig/openai/models/response_format_text.rbs +++ b/sig/openai/models/response_format_text.rbs @@ -2,16 +2,12 @@ module OpenAI module Models type response_format_text = { type: :text } - class ResponseFormatText < OpenAI::BaseModel + class ResponseFormatText < OpenAI::Internal::Type::BaseModel attr_accessor type: :text - def initialize: - (type: :text) -> void - | ( - ?OpenAI::Models::response_format_text | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :text) -> void - def to_hash: -> OpenAI::Models::response_format_text + def to_hash: -> { type: :text } end end end diff --git a/sig/openai/models/response_format_text_grammar.rbs b/sig/openai/models/response_format_text_grammar.rbs new file mode 100644 index 00000000..5a49c0bd --- /dev/null +++ b/sig/openai/models/response_format_text_grammar.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + type response_format_text_grammar = { grammar: String, type: :grammar } + + class ResponseFormatTextGrammar < OpenAI::Internal::Type::BaseModel + attr_accessor grammar: String + + attr_accessor type: :grammar + + def initialize: (grammar: String, ?type: :grammar) -> void + + def to_hash: -> { grammar: String, type: :grammar } + end + end +end diff --git a/sig/openai/models/response_format_text_python.rbs b/sig/openai/models/response_format_text_python.rbs new file mode 100644 index 00000000..ac13e843 --- /dev/null +++ b/sig/openai/models/response_format_text_python.rbs @@ -0,0 +1,13 @@ +module OpenAI + module Models + type response_format_text_python = { type: :python } + + class ResponseFormatTextPython < OpenAI::Internal::Type::BaseModel + attr_accessor type: :python + + def initialize: (?type: :python) -> void + + def to_hash: -> { type: :python } + end + end +end diff --git a/sig/openai/models/responses/computer_tool.rbs b/sig/openai/models/responses/computer_tool.rbs index 39cc531c..a2e42564 100644 --- a/sig/openai/models/responses/computer_tool.rbs +++ b/sig/openai/models/responses/computer_tool.rbs @@ -3,43 +3,47 @@ module OpenAI module Responses type computer_tool = { - display_height: Float, - display_width: Float, + display_height: Integer, + display_width: Integer, environment: OpenAI::Models::Responses::ComputerTool::environment, type: :computer_use_preview } - class ComputerTool < OpenAI::BaseModel - attr_accessor display_height: Float + class ComputerTool < OpenAI::Internal::Type::BaseModel + attr_accessor display_height: Integer - attr_accessor display_width: Float + attr_accessor display_width: Integer attr_accessor environment: OpenAI::Models::Responses::ComputerTool::environment attr_accessor type: :computer_use_preview - def initialize: - ( - display_height: Float, - display_width: Float, - environment: OpenAI::Models::Responses::ComputerTool::environment, - type: :computer_use_preview - ) -> void - | ( - ?OpenAI::Models::Responses::computer_tool | OpenAI::BaseModel data - ) -> void + def initialize: ( + display_height: Integer, + display_width: Integer, + environment: OpenAI::Models::Responses::ComputerTool::environment, + ?type: :computer_use_preview + ) -> void - def to_hash: -> OpenAI::Models::Responses::computer_tool + def to_hash: -> { + display_height: Integer, + display_width: Integer, + environment: OpenAI::Models::Responses::ComputerTool::environment, + type: :computer_use_preview + } - type environment = :mac | :windows | :ubuntu | :browser + type environment = :windows | :mac | :linux | :ubuntu | :browser + + module Environment + extend OpenAI::Internal::Type::Enum - class Environment < OpenAI::Enum - MAC: :mac WINDOWS: :windows + MAC: :mac + LINUX: :linux UBUNTU: :ubuntu BROWSER: :browser - def self.values: -> ::Array[OpenAI::Models::Responses::ComputerTool::environment] + def self?.values: -> ::Array[OpenAI::Models::Responses::ComputerTool::environment] end end end diff --git a/sig/openai/models/responses/custom_tool.rbs b/sig/openai/models/responses/custom_tool.rbs new file mode 100644 index 00000000..6d529cea --- /dev/null +++ b/sig/openai/models/responses/custom_tool.rbs @@ -0,0 +1,43 @@ +module OpenAI + module Models + module Responses + type custom_tool = + { + name: String, + type: :custom, + description: String, + format_: OpenAI::Models::custom_tool_input_format + } + + class CustomTool < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + attr_accessor type: :custom + + attr_reader description: String? + + def description=: (String) -> String + + attr_reader format_: OpenAI::Models::custom_tool_input_format? + + def format_=: ( + OpenAI::Models::custom_tool_input_format + ) -> OpenAI::Models::custom_tool_input_format + + def initialize: ( + name: String, + ?description: String, + ?format_: OpenAI::Models::custom_tool_input_format, + ?type: :custom + ) -> void + + def to_hash: -> { + name: String, + type: :custom, + description: String, + format_: OpenAI::Models::custom_tool_input_format + } + end + end + end +end diff --git a/sig/openai/models/responses/easy_input_message.rbs b/sig/openai/models/responses/easy_input_message.rbs index 4f7d7d6b..eb94d8c1 100644 --- a/sig/openai/models/responses/easy_input_message.rbs +++ b/sig/openai/models/responses/easy_input_message.rbs @@ -8,7 +8,7 @@ module OpenAI type: OpenAI::Models::Responses::EasyInputMessage::type_ } - class EasyInputMessage < OpenAI::BaseModel + class EasyInputMessage < OpenAI::Internal::Type::BaseModel attr_accessor content: OpenAI::Models::Responses::EasyInputMessage::content attr_accessor role: OpenAI::Models::Responses::EasyInputMessage::role @@ -19,44 +19,49 @@ module OpenAI OpenAI::Models::Responses::EasyInputMessage::type_ ) -> OpenAI::Models::Responses::EasyInputMessage::type_ - def initialize: - ( - content: OpenAI::Models::Responses::EasyInputMessage::content, - role: OpenAI::Models::Responses::EasyInputMessage::role, - type: OpenAI::Models::Responses::EasyInputMessage::type_ - ) -> void - | ( - ?OpenAI::Models::Responses::easy_input_message - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Responses::EasyInputMessage::content, + role: OpenAI::Models::Responses::EasyInputMessage::role, + ?type: OpenAI::Models::Responses::EasyInputMessage::type_ + ) -> void - def to_hash: -> OpenAI::Models::Responses::easy_input_message + def to_hash: -> { + content: OpenAI::Models::Responses::EasyInputMessage::content, + role: OpenAI::Models::Responses::EasyInputMessage::role, + type: OpenAI::Models::Responses::EasyInputMessage::type_ + } type content = String | OpenAI::Models::Responses::response_input_message_content_list - class Content < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::Responses::response_input_message_content_list]] + module Content + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::EasyInputMessage::content] end type role = :user | :assistant | :system | :developer - class Role < OpenAI::Enum + module Role + extend OpenAI::Internal::Type::Enum + USER: :user ASSISTANT: :assistant SYSTEM: :system DEVELOPER: :developer - def self.values: -> ::Array[OpenAI::Models::Responses::EasyInputMessage::role] + def self?.values: -> ::Array[OpenAI::Models::Responses::EasyInputMessage::role] end type type_ = :message - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + MESSAGE: :message - def self.values: -> ::Array[OpenAI::Models::Responses::EasyInputMessage::type_] + def self?.values: -> ::Array[OpenAI::Models::Responses::EasyInputMessage::type_] end end end diff --git a/sig/openai/models/responses/file_search_tool.rbs b/sig/openai/models/responses/file_search_tool.rbs index 2c1ab9da..cc1a7d01 100644 --- a/sig/openai/models/responses/file_search_tool.rbs +++ b/sig/openai/models/responses/file_search_tool.rbs @@ -5,52 +5,50 @@ module OpenAI { type: :file_search, vector_store_ids: ::Array[String], - filters: OpenAI::Models::Responses::FileSearchTool::filters, + filters: OpenAI::Models::Responses::FileSearchTool::filters?, max_num_results: Integer, - ranking_options: OpenAI::Models::Responses::FileSearchTool::RankingOptions + ranking_options: OpenAI::Responses::FileSearchTool::RankingOptions } - class FileSearchTool < OpenAI::BaseModel + class FileSearchTool < OpenAI::Internal::Type::BaseModel attr_accessor type: :file_search attr_accessor vector_store_ids: ::Array[String] - attr_reader filters: OpenAI::Models::Responses::FileSearchTool::filters? - - def filters=: ( - OpenAI::Models::Responses::FileSearchTool::filters - ) -> OpenAI::Models::Responses::FileSearchTool::filters + attr_accessor filters: OpenAI::Models::Responses::FileSearchTool::filters? attr_reader max_num_results: Integer? def max_num_results=: (Integer) -> Integer - attr_reader ranking_options: OpenAI::Models::Responses::FileSearchTool::RankingOptions? + attr_reader ranking_options: OpenAI::Responses::FileSearchTool::RankingOptions? def ranking_options=: ( - OpenAI::Models::Responses::FileSearchTool::RankingOptions - ) -> OpenAI::Models::Responses::FileSearchTool::RankingOptions - - def initialize: - ( - vector_store_ids: ::Array[String], - filters: OpenAI::Models::Responses::FileSearchTool::filters, - max_num_results: Integer, - ranking_options: OpenAI::Models::Responses::FileSearchTool::RankingOptions, - type: :file_search - ) -> void - | ( - ?OpenAI::Models::Responses::file_search_tool - | OpenAI::BaseModel data - ) -> void + OpenAI::Responses::FileSearchTool::RankingOptions + ) -> OpenAI::Responses::FileSearchTool::RankingOptions - def to_hash: -> OpenAI::Models::Responses::file_search_tool + def initialize: ( + vector_store_ids: ::Array[String], + ?filters: OpenAI::Models::Responses::FileSearchTool::filters?, + ?max_num_results: Integer, + ?ranking_options: OpenAI::Responses::FileSearchTool::RankingOptions, + ?type: :file_search + ) -> void - type filters = - OpenAI::Models::ComparisonFilter | OpenAI::Models::CompoundFilter + def to_hash: -> { + type: :file_search, + vector_store_ids: ::Array[String], + filters: OpenAI::Models::Responses::FileSearchTool::filters?, + max_num_results: Integer, + ranking_options: OpenAI::Responses::FileSearchTool::RankingOptions + } - class Filters < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::ComparisonFilter], [nil, OpenAI::Models::CompoundFilter]] + type filters = OpenAI::ComparisonFilter | OpenAI::CompoundFilter + + module Filters + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::FileSearchTool::filters] end type ranking_options = @@ -59,7 +57,7 @@ module OpenAI score_threshold: Float } - class RankingOptions < OpenAI::BaseModel + class RankingOptions < OpenAI::Internal::Type::BaseModel attr_reader ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker? def ranker=: ( @@ -70,25 +68,25 @@ module OpenAI def score_threshold=: (Float) -> Float - def initialize: - ( - ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker, - score_threshold: Float - ) -> void - | ( - ?OpenAI::Models::Responses::FileSearchTool::ranking_options - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker, + ?score_threshold: Float + ) -> void - def to_hash: -> OpenAI::Models::Responses::FileSearchTool::ranking_options + def to_hash: -> { + ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker, + score_threshold: Float + } type ranker = :auto | :"default-2024-11-15" - class Ranker < OpenAI::Enum + module Ranker + extend OpenAI::Internal::Type::Enum + AUTO: :auto DEFAULT_2024_11_15: :"default-2024-11-15" - def self.values: -> ::Array[OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker] + def self?.values: -> ::Array[OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker] end end end diff --git a/sig/openai/models/responses/function_tool.rbs b/sig/openai/models/responses/function_tool.rbs index 3a052a98..df249b37 100644 --- a/sig/openai/models/responses/function_tool.rbs +++ b/sig/openai/models/responses/function_tool.rbs @@ -4,36 +4,38 @@ module OpenAI type function_tool = { name: String, - parameters: ::Hash[Symbol, top], - strict: bool, + parameters: ::Hash[Symbol, top]?, + strict: bool?, type: :function, description: String? } - class FunctionTool < OpenAI::BaseModel + class FunctionTool < OpenAI::Internal::Type::BaseModel attr_accessor name: String - attr_accessor parameters: ::Hash[Symbol, top] + attr_accessor parameters: ::Hash[Symbol, top]? - attr_accessor strict: bool + attr_accessor strict: bool? attr_accessor type: :function attr_accessor description: String? - def initialize: - ( - name: String, - parameters: ::Hash[Symbol, top], - strict: bool, - description: String?, - type: :function - ) -> void - | ( - ?OpenAI::Models::Responses::function_tool | OpenAI::BaseModel data - ) -> void + def initialize: ( + name: String, + parameters: ::Hash[Symbol, top]?, + strict: bool?, + ?description: String?, + ?type: :function + ) -> void - def to_hash: -> OpenAI::Models::Responses::function_tool + def to_hash: -> { + name: String, + parameters: ::Hash[Symbol, top]?, + strict: bool?, + type: :function, + description: String? + } end end end diff --git a/sig/openai/models/responses/input_item_list_params.rbs b/sig/openai/models/responses/input_item_list_params.rbs index e9993f02..321f338c 100644 --- a/sig/openai/models/responses/input_item_list_params.rbs +++ b/sig/openai/models/responses/input_item_list_params.rbs @@ -4,23 +4,25 @@ module OpenAI type input_item_list_params = { after: String, - before: String, + include: ::Array[OpenAI::Models::Responses::response_includable], limit: Integer, order: OpenAI::Models::Responses::InputItemListParams::order } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class InputItemListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class InputItemListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? def after=: (String) -> String - attr_reader before: String? + attr_reader include: ::Array[OpenAI::Models::Responses::response_includable]? - def before=: (String) -> String + def include=: ( + ::Array[OpenAI::Models::Responses::response_includable] + ) -> ::Array[OpenAI::Models::Responses::response_includable] attr_reader limit: Integer? @@ -32,28 +34,31 @@ module OpenAI OpenAI::Models::Responses::InputItemListParams::order ) -> OpenAI::Models::Responses::InputItemListParams::order - def initialize: - ( - after: String, - before: String, - limit: Integer, - order: OpenAI::Models::Responses::InputItemListParams::order, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Responses::input_item_list_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::input_item_list_params + def initialize: ( + ?after: String, + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?limit: Integer, + ?order: OpenAI::Models::Responses::InputItemListParams::order, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + include: ::Array[OpenAI::Models::Responses::response_includable], + limit: Integer, + order: OpenAI::Models::Responses::InputItemListParams::order, + request_options: OpenAI::RequestOptions + } type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::Responses::InputItemListParams::order] + def self?.values: -> ::Array[OpenAI::Models::Responses::InputItemListParams::order] end end end diff --git a/sig/openai/models/responses/response.rbs b/sig/openai/models/responses/response.rbs index 7bee86f9..41dbcf8d 100644 --- a/sig/openai/models/responses/response.rbs +++ b/sig/openai/models/responses/response.rbs @@ -5,11 +5,11 @@ module OpenAI { id: String, created_at: Float, - error: OpenAI::Models::Responses::ResponseError?, - incomplete_details: OpenAI::Models::Responses::Response::IncompleteDetails?, - instructions: String?, + error: OpenAI::Responses::ResponseError?, + incomplete_details: OpenAI::Responses::Response::IncompleteDetails?, + instructions: OpenAI::Models::Responses::Response::instructions?, metadata: OpenAI::Models::metadata?, - model: OpenAI::Models::Responses::Response::model, + model: OpenAI::Models::responses_model, object: :response, output: ::Array[OpenAI::Models::Responses::response_output_item], parallel_tool_calls: bool, @@ -17,30 +17,38 @@ module OpenAI tool_choice: OpenAI::Models::Responses::Response::tool_choice, tools: ::Array[OpenAI::Models::Responses::tool], top_p: Float?, + background: bool?, + conversation: OpenAI::Responses::Response::Conversation?, max_output_tokens: Integer?, + max_tool_calls: Integer?, previous_response_id: String?, - reasoning: OpenAI::Models::Reasoning?, + prompt: OpenAI::Responses::ResponsePrompt?, + prompt_cache_key: String, + reasoning: OpenAI::Reasoning?, + safety_identifier: String, + service_tier: OpenAI::Models::Responses::Response::service_tier?, status: OpenAI::Models::Responses::response_status, - text: OpenAI::Models::Responses::ResponseTextConfig, + text: OpenAI::Responses::ResponseTextConfig, + top_logprobs: Integer?, truncation: OpenAI::Models::Responses::Response::truncation?, - usage: OpenAI::Models::Responses::ResponseUsage, + usage: OpenAI::Responses::ResponseUsage, user: String } - class Response < OpenAI::BaseModel + class Response < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created_at: Float - attr_accessor error: OpenAI::Models::Responses::ResponseError? + attr_accessor error: OpenAI::Responses::ResponseError? - attr_accessor incomplete_details: OpenAI::Models::Responses::Response::IncompleteDetails? + attr_accessor incomplete_details: OpenAI::Responses::Response::IncompleteDetails? - attr_accessor instructions: String? + attr_accessor instructions: OpenAI::Models::Responses::Response::instructions? attr_accessor metadata: OpenAI::Models::metadata? - attr_accessor model: OpenAI::Models::Responses::Response::model + attr_accessor model: OpenAI::Models::responses_model attr_accessor object: :response @@ -56,11 +64,29 @@ module OpenAI attr_accessor top_p: Float? + attr_accessor background: bool? + + attr_accessor conversation: OpenAI::Responses::Response::Conversation? + attr_accessor max_output_tokens: Integer? + attr_accessor max_tool_calls: Integer? + attr_accessor previous_response_id: String? - attr_accessor reasoning: OpenAI::Models::Reasoning? + attr_accessor prompt: OpenAI::Responses::ResponsePrompt? + + attr_reader prompt_cache_key: String? + + def prompt_cache_key=: (String) -> String + + attr_accessor reasoning: OpenAI::Reasoning? + + attr_reader safety_identifier: String? + + def safety_identifier=: (String) -> String + + attr_accessor service_tier: OpenAI::Models::Responses::Response::service_tier? attr_reader status: OpenAI::Models::Responses::response_status? @@ -68,110 +94,182 @@ module OpenAI OpenAI::Models::Responses::response_status ) -> OpenAI::Models::Responses::response_status - attr_reader text: OpenAI::Models::Responses::ResponseTextConfig? + attr_reader text: OpenAI::Responses::ResponseTextConfig? def text=: ( - OpenAI::Models::Responses::ResponseTextConfig - ) -> OpenAI::Models::Responses::ResponseTextConfig + OpenAI::Responses::ResponseTextConfig + ) -> OpenAI::Responses::ResponseTextConfig + + attr_accessor top_logprobs: Integer? attr_accessor truncation: OpenAI::Models::Responses::Response::truncation? - attr_reader usage: OpenAI::Models::Responses::ResponseUsage? + attr_reader usage: OpenAI::Responses::ResponseUsage? def usage=: ( - OpenAI::Models::Responses::ResponseUsage - ) -> OpenAI::Models::Responses::ResponseUsage + OpenAI::Responses::ResponseUsage + ) -> OpenAI::Responses::ResponseUsage attr_reader user: String? def user=: (String) -> String - def initialize: - ( - id: String, - created_at: Float, - error: OpenAI::Models::Responses::ResponseError?, - incomplete_details: OpenAI::Models::Responses::Response::IncompleteDetails?, - instructions: String?, - metadata: OpenAI::Models::metadata?, - model: OpenAI::Models::Responses::Response::model, - output: ::Array[OpenAI::Models::Responses::response_output_item], - parallel_tool_calls: bool, - temperature: Float?, - tool_choice: OpenAI::Models::Responses::Response::tool_choice, - tools: ::Array[OpenAI::Models::Responses::tool], - top_p: Float?, - max_output_tokens: Integer?, - previous_response_id: String?, - reasoning: OpenAI::Models::Reasoning?, - status: OpenAI::Models::Responses::response_status, - text: OpenAI::Models::Responses::ResponseTextConfig, - truncation: OpenAI::Models::Responses::Response::truncation?, - usage: OpenAI::Models::Responses::ResponseUsage, - user: String, - object: :response - ) -> void - | ( - ?OpenAI::Models::Responses::response | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response + def initialize: ( + id: String, + created_at: Float, + error: OpenAI::Responses::ResponseError?, + incomplete_details: OpenAI::Responses::Response::IncompleteDetails?, + instructions: OpenAI::Models::Responses::Response::instructions?, + metadata: OpenAI::Models::metadata?, + model: OpenAI::Models::responses_model, + output: ::Array[OpenAI::Models::Responses::response_output_item], + parallel_tool_calls: bool, + temperature: Float?, + tool_choice: OpenAI::Models::Responses::Response::tool_choice, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float?, + ?background: bool?, + ?conversation: OpenAI::Responses::Response::Conversation?, + ?max_output_tokens: Integer?, + ?max_tool_calls: Integer?, + ?previous_response_id: String?, + ?prompt: OpenAI::Responses::ResponsePrompt?, + ?prompt_cache_key: String, + ?reasoning: OpenAI::Reasoning?, + ?safety_identifier: String, + ?service_tier: OpenAI::Models::Responses::Response::service_tier?, + ?status: OpenAI::Models::Responses::response_status, + ?text: OpenAI::Responses::ResponseTextConfig, + ?top_logprobs: Integer?, + ?truncation: OpenAI::Models::Responses::Response::truncation?, + ?usage: OpenAI::Responses::ResponseUsage, + ?user: String, + ?object: :response + ) -> void + + def to_hash: -> { + id: String, + created_at: Float, + error: OpenAI::Responses::ResponseError?, + incomplete_details: OpenAI::Responses::Response::IncompleteDetails?, + instructions: OpenAI::Models::Responses::Response::instructions?, + metadata: OpenAI::Models::metadata?, + model: OpenAI::Models::responses_model, + object: :response, + output: ::Array[OpenAI::Models::Responses::response_output_item], + parallel_tool_calls: bool, + temperature: Float?, + tool_choice: OpenAI::Models::Responses::Response::tool_choice, + tools: ::Array[OpenAI::Models::Responses::tool], + top_p: Float?, + background: bool?, + conversation: OpenAI::Responses::Response::Conversation?, + max_output_tokens: Integer?, + max_tool_calls: Integer?, + previous_response_id: String?, + prompt: OpenAI::Responses::ResponsePrompt?, + prompt_cache_key: String, + reasoning: OpenAI::Reasoning?, + safety_identifier: String, + service_tier: OpenAI::Models::Responses::Response::service_tier?, + status: OpenAI::Models::Responses::response_status, + text: OpenAI::Responses::ResponseTextConfig, + top_logprobs: Integer?, + truncation: OpenAI::Models::Responses::Response::truncation?, + usage: OpenAI::Responses::ResponseUsage, + user: String + } type incomplete_details = { reason: OpenAI::Models::Responses::Response::IncompleteDetails::reason } - class IncompleteDetails < OpenAI::BaseModel + class IncompleteDetails < OpenAI::Internal::Type::BaseModel attr_reader reason: OpenAI::Models::Responses::Response::IncompleteDetails::reason? def reason=: ( OpenAI::Models::Responses::Response::IncompleteDetails::reason ) -> OpenAI::Models::Responses::Response::IncompleteDetails::reason - def initialize: - ( - reason: OpenAI::Models::Responses::Response::IncompleteDetails::reason - ) -> void - | ( - ?OpenAI::Models::Responses::Response::incomplete_details - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?reason: OpenAI::Models::Responses::Response::IncompleteDetails::reason + ) -> void - def to_hash: -> OpenAI::Models::Responses::Response::incomplete_details + def to_hash: -> { + reason: OpenAI::Models::Responses::Response::IncompleteDetails::reason + } type reason = :max_output_tokens | :content_filter - class Reason < OpenAI::Enum + module Reason + extend OpenAI::Internal::Type::Enum + MAX_OUTPUT_TOKENS: :max_output_tokens CONTENT_FILTER: :content_filter - def self.values: -> ::Array[OpenAI::Models::Responses::Response::IncompleteDetails::reason] + def self?.values: -> ::Array[OpenAI::Models::Responses::Response::IncompleteDetails::reason] end end - type model = String | OpenAI::Models::chat_model + type instructions = + String | ::Array[OpenAI::Models::Responses::response_input_item] + + module Instructions + extend OpenAI::Internal::Type::Union - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + def self?.variants: -> ::Array[OpenAI::Models::Responses::Response::instructions] + + ResponseInputItemArray: OpenAI::Internal::Type::Converter end type tool_choice = OpenAI::Models::Responses::tool_choice_options - | OpenAI::Models::Responses::ToolChoiceTypes - | OpenAI::Models::Responses::ToolChoiceFunction + | OpenAI::Responses::ToolChoiceAllowed + | OpenAI::Responses::ToolChoiceTypes + | OpenAI::Responses::ToolChoiceFunction + | OpenAI::Responses::ToolChoiceMcp + | OpenAI::Responses::ToolChoiceCustom + + module ToolChoice + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::Response::tool_choice] + end + + type conversation = { id: String } + + class Conversation < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void - class ToolChoice < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Responses::tool_choice_options], [nil, OpenAI::Models::Responses::ToolChoiceTypes], [nil, OpenAI::Models::Responses::ToolChoiceFunction]] + def to_hash: -> { id: String } + end + + type service_tier = :auto | :default | :flex | :scale | :priority + + module ServiceTier + extend OpenAI::Internal::Type::Enum + + AUTO: :auto + DEFAULT: :default + FLEX: :flex + SCALE: :scale + PRIORITY: :priority + + def self?.values: -> ::Array[OpenAI::Models::Responses::Response::service_tier] end type truncation = :auto | :disabled - class Truncation < OpenAI::Enum + module Truncation + extend OpenAI::Internal::Type::Enum + AUTO: :auto DISABLED: :disabled - def self.values: -> ::Array[OpenAI::Models::Responses::Response::truncation] + def self?.values: -> ::Array[OpenAI::Models::Responses::Response::truncation] end end end diff --git a/sig/openai/models/responses/response_audio_delta_event.rbs b/sig/openai/models/responses/response_audio_delta_event.rbs index 3f1416c2..f0bcab36 100644 --- a/sig/openai/models/responses/response_audio_delta_event.rbs +++ b/sig/openai/models/responses/response_audio_delta_event.rbs @@ -2,21 +2,30 @@ module OpenAI module Models module Responses type response_audio_delta_event = - { delta: String, type: :"response.audio.delta" } + { + delta: String, + sequence_number: Integer, + type: :"response.audio.delta" + } - class ResponseAudioDeltaEvent < OpenAI::BaseModel + class ResponseAudioDeltaEvent < OpenAI::Internal::Type::BaseModel attr_accessor delta: String + attr_accessor sequence_number: Integer + attr_accessor type: :"response.audio.delta" - def initialize: - (delta: String, type: :"response.audio.delta") -> void - | ( - ?OpenAI::Models::Responses::response_audio_delta_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + delta: String, + sequence_number: Integer, + ?type: :"response.audio.delta" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_audio_delta_event + def to_hash: -> { + delta: String, + sequence_number: Integer, + type: :"response.audio.delta" + } end end end diff --git a/sig/openai/models/responses/response_audio_done_event.rbs b/sig/openai/models/responses/response_audio_done_event.rbs index 2e521bbe..1c891acf 100644 --- a/sig/openai/models/responses/response_audio_done_event.rbs +++ b/sig/openai/models/responses/response_audio_done_event.rbs @@ -1,19 +1,23 @@ module OpenAI module Models module Responses - type response_audio_done_event = { type: :"response.audio.done" } + type response_audio_done_event = + { sequence_number: Integer, type: :"response.audio.done" } + + class ResponseAudioDoneEvent < OpenAI::Internal::Type::BaseModel + attr_accessor sequence_number: Integer - class ResponseAudioDoneEvent < OpenAI::BaseModel attr_accessor type: :"response.audio.done" - def initialize: - (type: :"response.audio.done") -> void - | ( - ?OpenAI::Models::Responses::response_audio_done_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + sequence_number: Integer, + ?type: :"response.audio.done" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_audio_done_event + def to_hash: -> { + sequence_number: Integer, + type: :"response.audio.done" + } end end end diff --git a/sig/openai/models/responses/response_audio_transcript_delta_event.rbs b/sig/openai/models/responses/response_audio_transcript_delta_event.rbs index 047e0072..57c45293 100644 --- a/sig/openai/models/responses/response_audio_transcript_delta_event.rbs +++ b/sig/openai/models/responses/response_audio_transcript_delta_event.rbs @@ -2,21 +2,30 @@ module OpenAI module Models module Responses type response_audio_transcript_delta_event = - { delta: String, type: :"response.audio.transcript.delta" } + { + delta: String, + sequence_number: Integer, + type: :"response.audio.transcript.delta" + } - class ResponseAudioTranscriptDeltaEvent < OpenAI::BaseModel + class ResponseAudioTranscriptDeltaEvent < OpenAI::Internal::Type::BaseModel attr_accessor delta: String + attr_accessor sequence_number: Integer + attr_accessor type: :"response.audio.transcript.delta" - def initialize: - (delta: String, type: :"response.audio.transcript.delta") -> void - | ( - ?OpenAI::Models::Responses::response_audio_transcript_delta_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + delta: String, + sequence_number: Integer, + ?type: :"response.audio.transcript.delta" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_audio_transcript_delta_event + def to_hash: -> { + delta: String, + sequence_number: Integer, + type: :"response.audio.transcript.delta" + } end end end diff --git a/sig/openai/models/responses/response_audio_transcript_done_event.rbs b/sig/openai/models/responses/response_audio_transcript_done_event.rbs index 225a0012..7bd59ad5 100644 --- a/sig/openai/models/responses/response_audio_transcript_done_event.rbs +++ b/sig/openai/models/responses/response_audio_transcript_done_event.rbs @@ -2,19 +2,22 @@ module OpenAI module Models module Responses type response_audio_transcript_done_event = - { type: :"response.audio.transcript.done" } + { sequence_number: Integer, type: :"response.audio.transcript.done" } + + class ResponseAudioTranscriptDoneEvent < OpenAI::Internal::Type::BaseModel + attr_accessor sequence_number: Integer - class ResponseAudioTranscriptDoneEvent < OpenAI::BaseModel attr_accessor type: :"response.audio.transcript.done" - def initialize: - (type: :"response.audio.transcript.done") -> void - | ( - ?OpenAI::Models::Responses::response_audio_transcript_done_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + sequence_number: Integer, + ?type: :"response.audio.transcript.done" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_audio_transcript_done_event + def to_hash: -> { + sequence_number: Integer, + type: :"response.audio.transcript.done" + } end end end diff --git a/sig/openai/models/responses/response_cancel_params.rbs b/sig/openai/models/responses/response_cancel_params.rbs new file mode 100644 index 00000000..6f0df4a4 --- /dev/null +++ b/sig/openai/models/responses/response_cancel_params.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Responses + type response_cancel_params = + { } & OpenAI::Internal::Type::request_parameters + + class ResponseCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + def initialize: (?request_options: OpenAI::request_opts) -> void + + def to_hash: -> { request_options: OpenAI::RequestOptions } + end + end + end +end diff --git a/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs b/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs index edd9f658..e5f7b5de 100644 --- a/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +++ b/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs @@ -4,29 +4,38 @@ module OpenAI type response_code_interpreter_call_code_delta_event = { delta: String, + item_id: String, output_index: Integer, - type: :"response.code_interpreter_call.code.delta" + sequence_number: Integer, + type: :"response.code_interpreter_call_code.delta" } - class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::BaseModel + class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseModel attr_accessor delta: String + attr_accessor item_id: String + attr_accessor output_index: Integer - attr_accessor type: :"response.code_interpreter_call.code.delta" + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.code_interpreter_call_code.delta" - def initialize: - ( - delta: String, - output_index: Integer, - type: :"response.code_interpreter_call.code.delta" - ) -> void - | ( - ?OpenAI::Models::Responses::response_code_interpreter_call_code_delta_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.code_interpreter_call_code.delta" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_code_interpreter_call_code_delta_event + def to_hash: -> { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.code_interpreter_call_code.delta" + } end end end diff --git a/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs b/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs index 91d301fe..57fe27ff 100644 --- a/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +++ b/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs @@ -4,29 +4,38 @@ module OpenAI type response_code_interpreter_call_code_done_event = { code: String, + item_id: String, output_index: Integer, - type: :"response.code_interpreter_call.code.done" + sequence_number: Integer, + type: :"response.code_interpreter_call_code.done" } - class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::BaseModel + class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseModel attr_accessor code: String + attr_accessor item_id: String + attr_accessor output_index: Integer - attr_accessor type: :"response.code_interpreter_call.code.done" + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.code_interpreter_call_code.done" - def initialize: - ( - code: String, - output_index: Integer, - type: :"response.code_interpreter_call.code.done" - ) -> void - | ( - ?OpenAI::Models::Responses::response_code_interpreter_call_code_done_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + code: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.code_interpreter_call_code.done" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_code_interpreter_call_code_done_event + def to_hash: -> { + code: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.code_interpreter_call_code.done" + } end end end diff --git a/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs b/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs index 8faa1b2d..ce97fd9b 100644 --- a/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +++ b/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs @@ -3,30 +3,34 @@ module OpenAI module Responses type response_code_interpreter_call_completed_event = { - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, + item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.code_interpreter_call.completed" } - class ResponseCodeInterpreterCallCompletedEvent < OpenAI::BaseModel - attr_accessor code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall + class ResponseCodeInterpreterCallCompletedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.code_interpreter_call.completed" - def initialize: - ( - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, - output_index: Integer, - type: :"response.code_interpreter_call.completed" - ) -> void - | ( - ?OpenAI::Models::Responses::response_code_interpreter_call_completed_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.code_interpreter_call.completed" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_code_interpreter_call_completed_event + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.code_interpreter_call.completed" + } end end end diff --git a/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs b/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs index ee6b0ff0..3ce614af 100644 --- a/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +++ b/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs @@ -3,30 +3,34 @@ module OpenAI module Responses type response_code_interpreter_call_in_progress_event = { - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, + item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.code_interpreter_call.in_progress" } - class ResponseCodeInterpreterCallInProgressEvent < OpenAI::BaseModel - attr_accessor code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall + class ResponseCodeInterpreterCallInProgressEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.code_interpreter_call.in_progress" - def initialize: - ( - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, - output_index: Integer, - type: :"response.code_interpreter_call.in_progress" - ) -> void - | ( - ?OpenAI::Models::Responses::response_code_interpreter_call_in_progress_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.code_interpreter_call.in_progress" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_code_interpreter_call_in_progress_event + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.code_interpreter_call.in_progress" + } end end end diff --git a/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs b/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs index a8ed5522..9fd220a6 100644 --- a/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +++ b/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs @@ -3,30 +3,34 @@ module OpenAI module Responses type response_code_interpreter_call_interpreting_event = { - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, + item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.code_interpreter_call.interpreting" } - class ResponseCodeInterpreterCallInterpretingEvent < OpenAI::BaseModel - attr_accessor code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall + class ResponseCodeInterpreterCallInterpretingEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.code_interpreter_call.interpreting" - def initialize: - ( - code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, - output_index: Integer, - type: :"response.code_interpreter_call.interpreting" - ) -> void - | ( - ?OpenAI::Models::Responses::response_code_interpreter_call_interpreting_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.code_interpreter_call.interpreting" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_code_interpreter_call_interpreting_event + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.code_interpreter_call.interpreting" + } end end end diff --git a/sig/openai/models/responses/response_code_interpreter_tool_call.rbs b/sig/openai/models/responses/response_code_interpreter_tool_call.rbs index 4f0b66ba..343ac022 100644 --- a/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +++ b/sig/openai/models/responses/response_code_interpreter_tool_call.rbs @@ -4,112 +4,91 @@ module OpenAI type response_code_interpreter_tool_call = { id: String, - code: String, - results: ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::result], + code: String?, + container_id: String, + outputs: ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::output]?, status: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::status, type: :code_interpreter_call } - class ResponseCodeInterpreterToolCall < OpenAI::BaseModel + class ResponseCodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor code: String + attr_accessor code: String? - attr_accessor results: ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::result] + attr_accessor container_id: String + + attr_accessor outputs: ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::output]? attr_accessor status: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::status attr_accessor type: :code_interpreter_call - def initialize: - ( - id: String, - code: String, - results: ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::result], - status: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::status, - type: :code_interpreter_call - ) -> void - | ( - ?OpenAI::Models::Responses::response_code_interpreter_tool_call - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_code_interpreter_tool_call - - type result = - OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs - | OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files - - class Result < OpenAI::Union - type logs = { logs: String, type: :logs } - - class Logs < OpenAI::BaseModel - attr_accessor logs: String + def initialize: ( + id: String, + code: String?, + container_id: String, + outputs: ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::output]?, + status: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::status, + ?type: :code_interpreter_call + ) -> void - attr_accessor type: :logs + def to_hash: -> { + id: String, + code: String?, + container_id: String, + outputs: ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::output]?, + status: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::status, + type: :code_interpreter_call + } - def initialize: - (logs: String, type: :logs) -> void - | ( - ?OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::logs - | OpenAI::BaseModel data - ) -> void + type output = + OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs + | OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image - def to_hash: -> OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::logs - end + module Output + extend OpenAI::Internal::Type::Union - type files = - { - files: ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File], - type: :files - } + type logs = { logs: String, type: :logs } - class Files < OpenAI::BaseModel - attr_accessor files: ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File] + class Logs < OpenAI::Internal::Type::BaseModel + attr_accessor logs: String - attr_accessor type: :files + attr_accessor type: :logs - def initialize: - ( - files: ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File], - type: :files - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::files - | OpenAI::BaseModel data - ) -> void + def initialize: (logs: String, ?type: :logs) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::files + def to_hash: -> { logs: String, type: :logs } + end - type file = { file_id: String, mime_type: String } + type image = { type: :image, url: String } - class File < OpenAI::BaseModel - attr_accessor file_id: String + class Image < OpenAI::Internal::Type::BaseModel + attr_accessor type: :image - attr_accessor mime_type: String + attr_accessor url: String - def initialize: - (file_id: String, mime_type: String) -> void - | ( - ?OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::file - | OpenAI::BaseModel data - ) -> void + def initialize: (url: String, ?type: :image) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::file - end + def to_hash: -> { type: :image, url: String } end - private def self.variants: -> [[:logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Logs], [:files, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files]] + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::output] end - type status = :in_progress | :interpreting | :completed + type status = + :in_progress | :completed | :incomplete | :interpreting | :failed + + module Status + extend OpenAI::Internal::Type::Enum - class Status < OpenAI::Enum IN_PROGRESS: :in_progress - INTERPRETING: :interpreting COMPLETED: :completed + INCOMPLETE: :incomplete + INTERPRETING: :interpreting + FAILED: :failed - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::status] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::status] end end end diff --git a/sig/openai/models/responses/response_completed_event.rbs b/sig/openai/models/responses/response_completed_event.rbs index 05a0a5ab..e3c62ea6 100644 --- a/sig/openai/models/responses/response_completed_event.rbs +++ b/sig/openai/models/responses/response_completed_event.rbs @@ -3,26 +3,29 @@ module OpenAI module Responses type response_completed_event = { - response: OpenAI::Models::Responses::Response, + response: OpenAI::Responses::Response, + sequence_number: Integer, type: :"response.completed" } - class ResponseCompletedEvent < OpenAI::BaseModel - attr_accessor response: OpenAI::Models::Responses::Response + class ResponseCompletedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor response: OpenAI::Responses::Response + + attr_accessor sequence_number: Integer attr_accessor type: :"response.completed" - def initialize: - ( - response: OpenAI::Models::Responses::Response, - type: :"response.completed" - ) -> void - | ( - ?OpenAI::Models::Responses::response_completed_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + response: OpenAI::Responses::Response, + sequence_number: Integer, + ?type: :"response.completed" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_completed_event + def to_hash: -> { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: :"response.completed" + } end end end diff --git a/sig/openai/models/responses/response_computer_tool_call.rbs b/sig/openai/models/responses/response_computer_tool_call.rbs index 49069bb3..094df5fd 100644 --- a/sig/openai/models/responses/response_computer_tool_call.rbs +++ b/sig/openai/models/responses/response_computer_tool_call.rbs @@ -6,52 +6,56 @@ module OpenAI id: String, action: OpenAI::Models::Responses::ResponseComputerToolCall::action, call_id: String, - pending_safety_checks: ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck], + pending_safety_checks: ::Array[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck], status: OpenAI::Models::Responses::ResponseComputerToolCall::status, type: OpenAI::Models::Responses::ResponseComputerToolCall::type_ } - class ResponseComputerToolCall < OpenAI::BaseModel + class ResponseComputerToolCall < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor action: OpenAI::Models::Responses::ResponseComputerToolCall::action attr_accessor call_id: String - attr_accessor pending_safety_checks: ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck] + attr_accessor pending_safety_checks: ::Array[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck] attr_accessor status: OpenAI::Models::Responses::ResponseComputerToolCall::status attr_accessor type: OpenAI::Models::Responses::ResponseComputerToolCall::type_ - def initialize: - ( - id: String, - action: OpenAI::Models::Responses::ResponseComputerToolCall::action, - call_id: String, - pending_safety_checks: ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck], - status: OpenAI::Models::Responses::ResponseComputerToolCall::status, - type: OpenAI::Models::Responses::ResponseComputerToolCall::type_ - ) -> void - | ( - ?OpenAI::Models::Responses::response_computer_tool_call - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_computer_tool_call + def initialize: ( + id: String, + action: OpenAI::Models::Responses::ResponseComputerToolCall::action, + call_id: String, + pending_safety_checks: ::Array[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck], + status: OpenAI::Models::Responses::ResponseComputerToolCall::status, + type: OpenAI::Models::Responses::ResponseComputerToolCall::type_ + ) -> void + + def to_hash: -> { + id: String, + action: OpenAI::Models::Responses::ResponseComputerToolCall::action, + call_id: String, + pending_safety_checks: ::Array[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck], + status: OpenAI::Models::Responses::ResponseComputerToolCall::status, + type: OpenAI::Models::Responses::ResponseComputerToolCall::type_ + } type action = - OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click - | OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick - | OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag - | OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress - | OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move - | OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot - | OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll - | OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type - | OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait - - class Action < OpenAI::Union + OpenAI::Responses::ResponseComputerToolCall::Action::Click + | OpenAI::Responses::ResponseComputerToolCall::Action::DoubleClick + | OpenAI::Responses::ResponseComputerToolCall::Action::Drag + | OpenAI::Responses::ResponseComputerToolCall::Action::Keypress + | OpenAI::Responses::ResponseComputerToolCall::Action::Move + | OpenAI::Responses::ResponseComputerToolCall::Action::Screenshot + | OpenAI::Responses::ResponseComputerToolCall::Action::Scroll + | OpenAI::Responses::ResponseComputerToolCall::Action::Type + | OpenAI::Responses::ResponseComputerToolCall::Action::Wait + + module Action + extend OpenAI::Internal::Type::Union + type click = { button: OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::button, @@ -60,7 +64,7 @@ module OpenAI y_: Integer } - class Click < OpenAI::BaseModel + class Click < OpenAI::Internal::Type::BaseModel attr_accessor button: OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::button attr_accessor type: :click @@ -69,142 +73,121 @@ module OpenAI attr_accessor y_: Integer - def initialize: - ( - button: OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::button, - x: Integer, - y_: Integer, - type: :click - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::Action::click - | OpenAI::BaseModel data - ) -> void + def initialize: ( + button: OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::button, + x: Integer, + y_: Integer, + ?type: :click + ) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::click + def to_hash: -> { + button: OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::button, + type: :click, + x: Integer, + y_: Integer + } type button = :left | :right | :wheel | :back | :forward - class Button < OpenAI::Enum + module Button + extend OpenAI::Internal::Type::Enum + LEFT: :left RIGHT: :right WHEEL: :wheel BACK: :back FORWARD: :forward - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::button] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::button] end end type double_click = { type: :double_click, x: Integer, y_: Integer } - class DoubleClick < OpenAI::BaseModel + class DoubleClick < OpenAI::Internal::Type::BaseModel attr_accessor type: :double_click attr_accessor x: Integer attr_accessor y_: Integer - def initialize: - (x: Integer, y_: Integer, type: :double_click) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::Action::double_click - | OpenAI::BaseModel data - ) -> void + def initialize: ( + x: Integer, + y_: Integer, + ?type: :double_click + ) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::double_click + def to_hash: -> { type: :double_click, x: Integer, y_: Integer } end type drag = { - path: ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path], + path: ::Array[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path], type: :drag } - class Drag < OpenAI::BaseModel - attr_accessor path: ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path] + class Drag < OpenAI::Internal::Type::BaseModel + attr_accessor path: ::Array[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path] attr_accessor type: :drag - def initialize: - ( - path: ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path], - type: :drag - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::Action::drag - | OpenAI::BaseModel data - ) -> void + def initialize: ( + path: ::Array[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path], + ?type: :drag + ) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::drag + def to_hash: -> { + path: ::Array[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path], + type: :drag + } type path = { x: Integer, y_: Integer } - class Path < OpenAI::BaseModel + class Path < OpenAI::Internal::Type::BaseModel attr_accessor x: Integer attr_accessor y_: Integer - def initialize: - (x: Integer, y_: Integer) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::path - | OpenAI::BaseModel data - ) -> void + def initialize: (x: Integer, y_: Integer) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::path + def to_hash: -> { x: Integer, y_: Integer } end end type keypress = { keys: ::Array[String], type: :keypress } - class Keypress < OpenAI::BaseModel + class Keypress < OpenAI::Internal::Type::BaseModel attr_accessor keys: ::Array[String] attr_accessor type: :keypress - def initialize: - (keys: ::Array[String], type: :keypress) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::Action::keypress - | OpenAI::BaseModel data - ) -> void + def initialize: (keys: ::Array[String], ?type: :keypress) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::keypress + def to_hash: -> { keys: ::Array[String], type: :keypress } end type move = { type: :move, x: Integer, y_: Integer } - class Move < OpenAI::BaseModel + class Move < OpenAI::Internal::Type::BaseModel attr_accessor type: :move attr_accessor x: Integer attr_accessor y_: Integer - def initialize: - (x: Integer, y_: Integer, type: :move) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::Action::move - | OpenAI::BaseModel data - ) -> void + def initialize: (x: Integer, y_: Integer, ?type: :move) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::move + def to_hash: -> { type: :move, x: Integer, y_: Integer } end type screenshot = { type: :screenshot } - class Screenshot < OpenAI::BaseModel + class Screenshot < OpenAI::Internal::Type::BaseModel attr_accessor type: :screenshot - def initialize: - (type: :screenshot) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::Action::screenshot - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :screenshot) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::screenshot + def to_hash: -> { type: :screenshot } end type scroll = @@ -216,7 +199,7 @@ module OpenAI y_: Integer } - class Scroll < OpenAI::BaseModel + class Scroll < OpenAI::Internal::Type::BaseModel attr_accessor scroll_x: Integer attr_accessor scroll_y: Integer @@ -227,93 +210,83 @@ module OpenAI attr_accessor y_: Integer - def initialize: - ( - scroll_x: Integer, - scroll_y: Integer, - x: Integer, - y_: Integer, - type: :scroll - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::Action::scroll - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::scroll + def initialize: ( + scroll_x: Integer, + scroll_y: Integer, + x: Integer, + y_: Integer, + ?type: :scroll + ) -> void + + def to_hash: -> { + scroll_x: Integer, + scroll_y: Integer, + type: :scroll, + x: Integer, + y_: Integer + } end type type_ = { text: String, type: :type } - class Type < OpenAI::BaseModel + class Type < OpenAI::Internal::Type::BaseModel attr_accessor text: String attr_accessor type: :type - def initialize: - (text: String, type: :type) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::Action::type_ - | OpenAI::BaseModel data - ) -> void + def initialize: (text: String, ?type: :type) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::type_ + def to_hash: -> { text: String, type: :type } end type wait = { type: :wait } - class Wait < OpenAI::BaseModel + class Wait < OpenAI::Internal::Type::BaseModel attr_accessor type: :wait - def initialize: - (type: :wait) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::Action::wait - | OpenAI::BaseModel data - ) -> void + def initialize: (?type: :wait) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::Action::wait + def to_hash: -> { type: :wait } end - private def self.variants: -> [[:click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click], [:double_click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick], [:drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag], [:keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress], [:move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move], [:screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot], [:scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll], [:type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type], [:wait, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait]] + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::action] end type pending_safety_check = { id: String, code: String, message: String } - class PendingSafetyCheck < OpenAI::BaseModel + class PendingSafetyCheck < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor code: String attr_accessor message: String - def initialize: - (id: String, code: String, message: String) -> void - | ( - ?OpenAI::Models::Responses::ResponseComputerToolCall::pending_safety_check - | OpenAI::BaseModel data - ) -> void + def initialize: (id: String, code: String, message: String) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseComputerToolCall::pending_safety_check + def to_hash: -> { id: String, code: String, message: String } end type status = :in_progress | :completed | :incomplete - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress COMPLETED: :completed INCOMPLETE: :incomplete - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::status] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::status] end type type_ = :computer_call - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + COMPUTER_CALL: :computer_call - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::type_] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseComputerToolCall::type_] end end end diff --git a/sig/openai/models/responses/response_computer_tool_call_output_item.rbs b/sig/openai/models/responses/response_computer_tool_call_output_item.rbs new file mode 100644 index 00000000..a21f2310 --- /dev/null +++ b/sig/openai/models/responses/response_computer_tool_call_output_item.rbs @@ -0,0 +1,82 @@ +module OpenAI + module Models + module Responses + type response_computer_tool_call_output_item = + { + id: String, + call_id: String, + output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + type: :computer_call_output, + acknowledged_safety_checks: ::Array[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck], + status: OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::status + } + + class ResponseComputerToolCallOutputItem < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor call_id: String + + attr_accessor output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot + + attr_accessor type: :computer_call_output + + attr_reader acknowledged_safety_checks: ::Array[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck]? + + def acknowledged_safety_checks=: ( + ::Array[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck] + ) -> ::Array[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck] + + attr_reader status: OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::status? + + def status=: ( + OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::status + ) -> OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::status + + def initialize: ( + id: String, + call_id: String, + output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + ?acknowledged_safety_checks: ::Array[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck], + ?status: OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::status, + ?type: :computer_call_output + ) -> void + + def to_hash: -> { + id: String, + call_id: String, + output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + type: :computer_call_output, + acknowledged_safety_checks: ::Array[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck], + status: OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::status + } + + type acknowledged_safety_check = + { id: String, code: String, message: String } + + class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor code: String + + attr_accessor message: String + + def initialize: (id: String, code: String, message: String) -> void + + def to_hash: -> { id: String, code: String, message: String } + end + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::status] + end + end + end + end +end diff --git a/sig/openai/models/responses/response_computer_tool_call_output_screenshot.rbs b/sig/openai/models/responses/response_computer_tool_call_output_screenshot.rbs new file mode 100644 index 00000000..3f522d97 --- /dev/null +++ b/sig/openai/models/responses/response_computer_tool_call_output_screenshot.rbs @@ -0,0 +1,32 @@ +module OpenAI + module Models + module Responses + type response_computer_tool_call_output_screenshot = + { type: :computer_screenshot, file_id: String, image_url: String } + + class ResponseComputerToolCallOutputScreenshot < OpenAI::Internal::Type::BaseModel + attr_accessor type: :computer_screenshot + + attr_reader file_id: String? + + def file_id=: (String) -> String + + attr_reader image_url: String? + + def image_url=: (String) -> String + + def initialize: ( + ?file_id: String, + ?image_url: String, + ?type: :computer_screenshot + ) -> void + + def to_hash: -> { + type: :computer_screenshot, + file_id: String, + image_url: String + } + end + end + end +end diff --git a/sig/openai/models/responses/response_content.rbs b/sig/openai/models/responses/response_content.rbs index 246a0708..deb9917d 100644 --- a/sig/openai/models/responses/response_content.rbs +++ b/sig/openai/models/responses/response_content.rbs @@ -2,14 +2,16 @@ module OpenAI module Models module Responses type response_content = - OpenAI::Models::Responses::ResponseInputText - | OpenAI::Models::Responses::ResponseInputImage - | OpenAI::Models::Responses::ResponseInputFile - | OpenAI::Models::Responses::ResponseOutputText - | OpenAI::Models::Responses::ResponseOutputRefusal + OpenAI::Responses::ResponseInputText + | OpenAI::Responses::ResponseInputImage + | OpenAI::Responses::ResponseInputFile + | OpenAI::Responses::ResponseOutputText + | OpenAI::Responses::ResponseOutputRefusal - class ResponseContent < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Responses::ResponseInputText], [nil, OpenAI::Models::Responses::ResponseInputImage], [nil, OpenAI::Models::Responses::ResponseInputFile], [nil, OpenAI::Models::Responses::ResponseOutputText], [nil, OpenAI::Models::Responses::ResponseOutputRefusal]] + module ResponseContent + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::response_content] end end end diff --git a/sig/openai/models/responses/response_content_part_added_event.rbs b/sig/openai/models/responses/response_content_part_added_event.rbs index 37deb50b..efdf1ffe 100644 --- a/sig/openai/models/responses/response_content_part_added_event.rbs +++ b/sig/openai/models/responses/response_content_part_added_event.rbs @@ -7,10 +7,11 @@ module OpenAI item_id: String, output_index: Integer, part: OpenAI::Models::Responses::ResponseContentPartAddedEvent::part, + sequence_number: Integer, type: :"response.content_part.added" } - class ResponseContentPartAddedEvent < OpenAI::BaseModel + class ResponseContentPartAddedEvent < OpenAI::Internal::Type::BaseModel attr_accessor content_index: Integer attr_accessor item_id: String @@ -19,29 +20,36 @@ module OpenAI attr_accessor part: OpenAI::Models::Responses::ResponseContentPartAddedEvent::part + attr_accessor sequence_number: Integer + attr_accessor type: :"response.content_part.added" - def initialize: - ( - content_index: Integer, - item_id: String, - output_index: Integer, - part: OpenAI::Models::Responses::ResponseContentPartAddedEvent::part, - type: :"response.content_part.added" - ) -> void - | ( - ?OpenAI::Models::Responses::response_content_part_added_event - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_content_part_added_event + def initialize: ( + content_index: Integer, + item_id: String, + output_index: Integer, + part: OpenAI::Models::Responses::ResponseContentPartAddedEvent::part, + sequence_number: Integer, + ?type: :"response.content_part.added" + ) -> void + + def to_hash: -> { + content_index: Integer, + item_id: String, + output_index: Integer, + part: OpenAI::Models::Responses::ResponseContentPartAddedEvent::part, + sequence_number: Integer, + type: :"response.content_part.added" + } type part = - OpenAI::Models::Responses::ResponseOutputText - | OpenAI::Models::Responses::ResponseOutputRefusal + OpenAI::Responses::ResponseOutputText + | OpenAI::Responses::ResponseOutputRefusal + + module Part + extend OpenAI::Internal::Type::Union - class Part < OpenAI::Union - private def self.variants: -> [[:output_text, OpenAI::Models::Responses::ResponseOutputText], [:refusal, OpenAI::Models::Responses::ResponseOutputRefusal]] + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseContentPartAddedEvent::part] end end end diff --git a/sig/openai/models/responses/response_content_part_done_event.rbs b/sig/openai/models/responses/response_content_part_done_event.rbs index d8830b30..53ea73b3 100644 --- a/sig/openai/models/responses/response_content_part_done_event.rbs +++ b/sig/openai/models/responses/response_content_part_done_event.rbs @@ -7,10 +7,11 @@ module OpenAI item_id: String, output_index: Integer, part: OpenAI::Models::Responses::ResponseContentPartDoneEvent::part, + sequence_number: Integer, type: :"response.content_part.done" } - class ResponseContentPartDoneEvent < OpenAI::BaseModel + class ResponseContentPartDoneEvent < OpenAI::Internal::Type::BaseModel attr_accessor content_index: Integer attr_accessor item_id: String @@ -19,29 +20,36 @@ module OpenAI attr_accessor part: OpenAI::Models::Responses::ResponseContentPartDoneEvent::part + attr_accessor sequence_number: Integer + attr_accessor type: :"response.content_part.done" - def initialize: - ( - content_index: Integer, - item_id: String, - output_index: Integer, - part: OpenAI::Models::Responses::ResponseContentPartDoneEvent::part, - type: :"response.content_part.done" - ) -> void - | ( - ?OpenAI::Models::Responses::response_content_part_done_event - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_content_part_done_event + def initialize: ( + content_index: Integer, + item_id: String, + output_index: Integer, + part: OpenAI::Models::Responses::ResponseContentPartDoneEvent::part, + sequence_number: Integer, + ?type: :"response.content_part.done" + ) -> void + + def to_hash: -> { + content_index: Integer, + item_id: String, + output_index: Integer, + part: OpenAI::Models::Responses::ResponseContentPartDoneEvent::part, + sequence_number: Integer, + type: :"response.content_part.done" + } type part = - OpenAI::Models::Responses::ResponseOutputText - | OpenAI::Models::Responses::ResponseOutputRefusal + OpenAI::Responses::ResponseOutputText + | OpenAI::Responses::ResponseOutputRefusal + + module Part + extend OpenAI::Internal::Type::Union - class Part < OpenAI::Union - private def self.variants: -> [[:output_text, OpenAI::Models::Responses::ResponseOutputText], [:refusal, OpenAI::Models::Responses::ResponseOutputRefusal]] + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseContentPartDoneEvent::part] end end end diff --git a/sig/openai/models/responses/response_conversation_param.rbs b/sig/openai/models/responses/response_conversation_param.rbs new file mode 100644 index 00000000..9784d5c7 --- /dev/null +++ b/sig/openai/models/responses/response_conversation_param.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + module Responses + type response_conversation_param = { id: String } + + class ResponseConversationParam < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + end + end +end diff --git a/sig/openai/models/responses/response_create_params.rbs b/sig/openai/models/responses/response_create_params.rbs index 785ee05f..4a636dd1 100644 --- a/sig/openai/models/responses/response_create_params.rbs +++ b/sig/openai/models/responses/response_create_params.rbs @@ -3,57 +3,94 @@ module OpenAI module Responses type response_create_params = { - input: OpenAI::Models::Responses::ResponseCreateParams::input, - model: OpenAI::Models::Responses::ResponseCreateParams::model, + background: bool?, + conversation: OpenAI::Models::Responses::ResponseCreateParams::conversation?, include: ::Array[OpenAI::Models::Responses::response_includable]?, + input: OpenAI::Models::Responses::ResponseCreateParams::input, instructions: String?, max_output_tokens: Integer?, + max_tool_calls: Integer?, metadata: OpenAI::Models::metadata?, + model: OpenAI::Models::responses_model, parallel_tool_calls: bool?, previous_response_id: String?, - reasoning: OpenAI::Models::Reasoning?, + prompt: OpenAI::Responses::ResponsePrompt?, + prompt_cache_key: String, + reasoning: OpenAI::Reasoning?, + safety_identifier: String, + service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, store: bool?, + stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, temperature: Float?, - text: OpenAI::Models::Responses::ResponseTextConfig, + text: OpenAI::Responses::ResponseTextConfig, tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, tools: ::Array[OpenAI::Models::Responses::tool], + top_logprobs: Integer?, top_p: Float?, truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?, user: String } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class ResponseCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ResponseCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - attr_accessor input: OpenAI::Models::Responses::ResponseCreateParams::input + attr_accessor background: bool? - attr_accessor model: OpenAI::Models::Responses::ResponseCreateParams::model + attr_accessor conversation: OpenAI::Models::Responses::ResponseCreateParams::conversation? attr_accessor include: ::Array[OpenAI::Models::Responses::response_includable]? + attr_reader input: OpenAI::Models::Responses::ResponseCreateParams::input? + + def input=: ( + OpenAI::Models::Responses::ResponseCreateParams::input + ) -> OpenAI::Models::Responses::ResponseCreateParams::input + attr_accessor instructions: String? attr_accessor max_output_tokens: Integer? + attr_accessor max_tool_calls: Integer? + attr_accessor metadata: OpenAI::Models::metadata? + attr_reader model: OpenAI::Models::responses_model? + + def model=: ( + OpenAI::Models::responses_model + ) -> OpenAI::Models::responses_model + attr_accessor parallel_tool_calls: bool? attr_accessor previous_response_id: String? - attr_accessor reasoning: OpenAI::Models::Reasoning? + attr_accessor prompt: OpenAI::Responses::ResponsePrompt? + + attr_reader prompt_cache_key: String? + + def prompt_cache_key=: (String) -> String + + attr_accessor reasoning: OpenAI::Reasoning? + + attr_reader safety_identifier: String? + + def safety_identifier=: (String) -> String + + attr_accessor service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier? attr_accessor store: bool? + attr_accessor stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions? + attr_accessor temperature: Float? - attr_reader text: OpenAI::Models::Responses::ResponseTextConfig? + attr_reader text: OpenAI::Responses::ResponseTextConfig? def text=: ( - OpenAI::Models::Responses::ResponseTextConfig - ) -> OpenAI::Models::Responses::ResponseTextConfig + OpenAI::Responses::ResponseTextConfig + ) -> OpenAI::Responses::ResponseTextConfig attr_reader tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice? @@ -67,6 +104,8 @@ module OpenAI ::Array[OpenAI::Models::Responses::tool] ) -> ::Array[OpenAI::Models::Responses::tool] + attr_accessor top_logprobs: Integer? + attr_accessor top_p: Float? attr_accessor truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation? @@ -75,62 +114,132 @@ module OpenAI def user=: (String) -> String - def initialize: - ( - input: OpenAI::Models::Responses::ResponseCreateParams::input, - model: OpenAI::Models::Responses::ResponseCreateParams::model, - include: ::Array[OpenAI::Models::Responses::response_includable]?, - instructions: String?, - max_output_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - parallel_tool_calls: bool?, - previous_response_id: String?, - reasoning: OpenAI::Models::Reasoning?, - store: bool?, - temperature: Float?, - text: OpenAI::Models::Responses::ResponseTextConfig, - tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, - tools: ::Array[OpenAI::Models::Responses::tool], - top_p: Float?, - truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?, - user: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Responses::response_create_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_create_params + def initialize: ( + ?background: bool?, + ?conversation: OpenAI::Models::Responses::ResponseCreateParams::conversation?, + ?include: ::Array[OpenAI::Models::Responses::response_includable]?, + ?input: OpenAI::Models::Responses::ResponseCreateParams::input, + ?instructions: String?, + ?max_output_tokens: Integer?, + ?max_tool_calls: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::responses_model, + ?parallel_tool_calls: bool?, + ?previous_response_id: String?, + ?prompt: OpenAI::Responses::ResponsePrompt?, + ?prompt_cache_key: String, + ?reasoning: OpenAI::Reasoning?, + ?safety_identifier: String, + ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, + ?store: bool?, + ?stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, + ?temperature: Float?, + ?text: OpenAI::Responses::ResponseTextConfig, + ?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, + ?tools: ::Array[OpenAI::Models::Responses::tool], + ?top_logprobs: Integer?, + ?top_p: Float?, + ?truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + background: bool?, + conversation: OpenAI::Models::Responses::ResponseCreateParams::conversation?, + include: ::Array[OpenAI::Models::Responses::response_includable]?, + input: OpenAI::Models::Responses::ResponseCreateParams::input, + instructions: String?, + max_output_tokens: Integer?, + max_tool_calls: Integer?, + metadata: OpenAI::Models::metadata?, + model: OpenAI::Models::responses_model, + parallel_tool_calls: bool?, + previous_response_id: String?, + prompt: OpenAI::Responses::ResponsePrompt?, + prompt_cache_key: String, + reasoning: OpenAI::Reasoning?, + safety_identifier: String, + service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, + store: bool?, + stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, + temperature: Float?, + text: OpenAI::Responses::ResponseTextConfig, + tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, + tools: ::Array[OpenAI::Models::Responses::tool], + top_logprobs: Integer?, + top_p: Float?, + truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?, + user: String, + request_options: OpenAI::RequestOptions + } + + type conversation = + String | OpenAI::Responses::ResponseConversationParam + + module Conversation + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::conversation] + end type input = String | OpenAI::Models::Responses::response_input - class Input < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::Responses::response_input]] + module Input + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::input] end - type model = String | OpenAI::Models::chat_model + type service_tier = :auto | :default | :flex | :scale | :priority - class Model < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, OpenAI::Models::chat_model]] + module ServiceTier + extend OpenAI::Internal::Type::Enum + + AUTO: :auto + DEFAULT: :default + FLEX: :flex + SCALE: :scale + PRIORITY: :priority + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::service_tier] + end + + type stream_options = { include_obfuscation: bool } + + class StreamOptions < OpenAI::Internal::Type::BaseModel + attr_reader include_obfuscation: bool? + + def include_obfuscation=: (bool) -> bool + + def initialize: (?include_obfuscation: bool) -> void + + def to_hash: -> { include_obfuscation: bool } end type tool_choice = OpenAI::Models::Responses::tool_choice_options - | OpenAI::Models::Responses::ToolChoiceTypes - | OpenAI::Models::Responses::ToolChoiceFunction + | OpenAI::Responses::ToolChoiceAllowed + | OpenAI::Responses::ToolChoiceTypes + | OpenAI::Responses::ToolChoiceFunction + | OpenAI::Responses::ToolChoiceMcp + | OpenAI::Responses::ToolChoiceCustom - class ToolChoice < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::Responses::tool_choice_options], [nil, OpenAI::Models::Responses::ToolChoiceTypes], [nil, OpenAI::Models::Responses::ToolChoiceFunction]] + module ToolChoice + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::tool_choice] end type truncation = :auto | :disabled - class Truncation < OpenAI::Enum + module Truncation + extend OpenAI::Internal::Type::Enum + AUTO: :auto DISABLED: :disabled - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::truncation] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::truncation] end end end diff --git a/sig/openai/models/responses/response_created_event.rbs b/sig/openai/models/responses/response_created_event.rbs index 6a67bba2..1681e66e 100644 --- a/sig/openai/models/responses/response_created_event.rbs +++ b/sig/openai/models/responses/response_created_event.rbs @@ -3,26 +3,29 @@ module OpenAI module Responses type response_created_event = { - response: OpenAI::Models::Responses::Response, + response: OpenAI::Responses::Response, + sequence_number: Integer, type: :"response.created" } - class ResponseCreatedEvent < OpenAI::BaseModel - attr_accessor response: OpenAI::Models::Responses::Response + class ResponseCreatedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor response: OpenAI::Responses::Response + + attr_accessor sequence_number: Integer attr_accessor type: :"response.created" - def initialize: - ( - response: OpenAI::Models::Responses::Response, - type: :"response.created" - ) -> void - | ( - ?OpenAI::Models::Responses::response_created_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + response: OpenAI::Responses::Response, + sequence_number: Integer, + ?type: :"response.created" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_created_event + def to_hash: -> { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: :"response.created" + } end end end diff --git a/sig/openai/models/responses/response_custom_tool_call.rbs b/sig/openai/models/responses/response_custom_tool_call.rbs new file mode 100644 index 00000000..16916b4f --- /dev/null +++ b/sig/openai/models/responses/response_custom_tool_call.rbs @@ -0,0 +1,44 @@ +module OpenAI + module Models + module Responses + type response_custom_tool_call = + { + call_id: String, + input: String, + name: String, + type: :custom_tool_call, + id: String + } + + class ResponseCustomToolCall < OpenAI::Internal::Type::BaseModel + attr_accessor call_id: String + + attr_accessor input: String + + attr_accessor name: String + + attr_accessor type: :custom_tool_call + + attr_reader id: String? + + def id=: (String) -> String + + def initialize: ( + call_id: String, + input: String, + name: String, + ?id: String, + ?type: :custom_tool_call + ) -> void + + def to_hash: -> { + call_id: String, + input: String, + name: String, + type: :custom_tool_call, + id: String + } + end + end + end +end diff --git a/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs b/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs new file mode 100644 index 00000000..030f7237 --- /dev/null +++ b/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs @@ -0,0 +1,42 @@ +module OpenAI + module Models + module Responses + type response_custom_tool_call_input_delta_event = + { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.custom_tool_call_input.delta" + } + + class ResponseCustomToolCallInputDeltaEvent < OpenAI::Internal::Type::BaseModel + attr_accessor delta: String + + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.custom_tool_call_input.delta" + + def initialize: ( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.custom_tool_call_input.delta" + ) -> void + + def to_hash: -> { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.custom_tool_call_input.delta" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs b/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs new file mode 100644 index 00000000..2378e7ae --- /dev/null +++ b/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs @@ -0,0 +1,42 @@ +module OpenAI + module Models + module Responses + type response_custom_tool_call_input_done_event = + { + input: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.custom_tool_call_input.done" + } + + class ResponseCustomToolCallInputDoneEvent < OpenAI::Internal::Type::BaseModel + attr_accessor input: String + + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.custom_tool_call_input.done" + + def initialize: ( + input: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.custom_tool_call_input.done" + ) -> void + + def to_hash: -> { + input: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.custom_tool_call_input.done" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_custom_tool_call_output.rbs b/sig/openai/models/responses/response_custom_tool_call_output.rbs new file mode 100644 index 00000000..d9c9486a --- /dev/null +++ b/sig/openai/models/responses/response_custom_tool_call_output.rbs @@ -0,0 +1,39 @@ +module OpenAI + module Models + module Responses + type response_custom_tool_call_output = + { + call_id: String, + output: String, + type: :custom_tool_call_output, + id: String + } + + class ResponseCustomToolCallOutput < OpenAI::Internal::Type::BaseModel + attr_accessor call_id: String + + attr_accessor output: String + + attr_accessor type: :custom_tool_call_output + + attr_reader id: String? + + def id=: (String) -> String + + def initialize: ( + call_id: String, + output: String, + ?id: String, + ?type: :custom_tool_call_output + ) -> void + + def to_hash: -> { + call_id: String, + output: String, + type: :custom_tool_call_output, + id: String + } + end + end + end +end diff --git a/sig/openai/models/responses/response_delete_params.rbs b/sig/openai/models/responses/response_delete_params.rbs index 509b3360..15aa4a54 100644 --- a/sig/openai/models/responses/response_delete_params.rbs +++ b/sig/openai/models/responses/response_delete_params.rbs @@ -1,20 +1,16 @@ module OpenAI module Models module Responses - type response_delete_params = { } & OpenAI::request_parameters + type response_delete_params = + { } & OpenAI::Internal::Type::request_parameters - class ResponseDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ResponseDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Responses::response_delete_params - | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::Responses::response_delete_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/responses/response_error.rbs b/sig/openai/models/responses/response_error.rbs index 0adca85b..a894688d 100644 --- a/sig/openai/models/responses/response_error.rbs +++ b/sig/openai/models/responses/response_error.rbs @@ -7,21 +7,20 @@ module OpenAI message: String } - class ResponseError < OpenAI::BaseModel + class ResponseError < OpenAI::Internal::Type::BaseModel attr_accessor code: OpenAI::Models::Responses::ResponseError::code attr_accessor message: String - def initialize: - ( - code: OpenAI::Models::Responses::ResponseError::code, - message: String - ) -> void - | ( - ?OpenAI::Models::Responses::response_error | OpenAI::BaseModel data - ) -> void + def initialize: ( + code: OpenAI::Models::Responses::ResponseError::code, + message: String + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_error + def to_hash: -> { + code: OpenAI::Models::Responses::ResponseError::code, + message: String + } type code = :server_error @@ -43,7 +42,9 @@ module OpenAI | :failed_to_download_image | :image_file_not_found - class Code < OpenAI::Enum + module Code + extend OpenAI::Internal::Type::Enum + SERVER_ERROR: :server_error RATE_LIMIT_EXCEEDED: :rate_limit_exceeded INVALID_PROMPT: :invalid_prompt @@ -63,7 +64,7 @@ module OpenAI FAILED_TO_DOWNLOAD_IMAGE: :failed_to_download_image IMAGE_FILE_NOT_FOUND: :image_file_not_found - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseError::code] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseError::code] end end end diff --git a/sig/openai/models/responses/response_error_event.rbs b/sig/openai/models/responses/response_error_event.rbs index ddfbd3d6..a7aa3f3f 100644 --- a/sig/openai/models/responses/response_error_event.rbs +++ b/sig/openai/models/responses/response_error_event.rbs @@ -2,25 +2,40 @@ module OpenAI module Models module Responses type response_error_event = - { code: String?, message: String, param: String?, type: :error } + { + code: String?, + message: String, + param: String?, + sequence_number: Integer, + type: :error + } - class ResponseErrorEvent < OpenAI::BaseModel + class ResponseErrorEvent < OpenAI::Internal::Type::BaseModel attr_accessor code: String? attr_accessor message: String attr_accessor param: String? + attr_accessor sequence_number: Integer + attr_accessor type: :error - def initialize: - (code: String?, message: String, param: String?, type: :error) -> void - | ( - ?OpenAI::Models::Responses::response_error_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + code: String?, + message: String, + param: String?, + sequence_number: Integer, + ?type: :error + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_error_event + def to_hash: -> { + code: String?, + message: String, + param: String?, + sequence_number: Integer, + type: :error + } end end end diff --git a/sig/openai/models/responses/response_failed_event.rbs b/sig/openai/models/responses/response_failed_event.rbs index 1edd4ecc..27befafe 100644 --- a/sig/openai/models/responses/response_failed_event.rbs +++ b/sig/openai/models/responses/response_failed_event.rbs @@ -3,26 +3,29 @@ module OpenAI module Responses type response_failed_event = { - response: OpenAI::Models::Responses::Response, + response: OpenAI::Responses::Response, + sequence_number: Integer, type: :"response.failed" } - class ResponseFailedEvent < OpenAI::BaseModel - attr_accessor response: OpenAI::Models::Responses::Response + class ResponseFailedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor response: OpenAI::Responses::Response + + attr_accessor sequence_number: Integer attr_accessor type: :"response.failed" - def initialize: - ( - response: OpenAI::Models::Responses::Response, - type: :"response.failed" - ) -> void - | ( - ?OpenAI::Models::Responses::response_failed_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + response: OpenAI::Responses::Response, + sequence_number: Integer, + ?type: :"response.failed" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_failed_event + def to_hash: -> { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: :"response.failed" + } end end end diff --git a/sig/openai/models/responses/response_file_search_call_completed_event.rbs b/sig/openai/models/responses/response_file_search_call_completed_event.rbs index 7f8857c3..80a0bc8e 100644 --- a/sig/openai/models/responses/response_file_search_call_completed_event.rbs +++ b/sig/openai/models/responses/response_file_search_call_completed_event.rbs @@ -5,28 +5,32 @@ module OpenAI { item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.file_search_call.completed" } - class ResponseFileSearchCallCompletedEvent < OpenAI::BaseModel + class ResponseFileSearchCallCompletedEvent < OpenAI::Internal::Type::BaseModel attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.file_search_call.completed" - def initialize: - ( - item_id: String, - output_index: Integer, - type: :"response.file_search_call.completed" - ) -> void - | ( - ?OpenAI::Models::Responses::response_file_search_call_completed_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.file_search_call.completed" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_file_search_call_completed_event + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.file_search_call.completed" + } end end end diff --git a/sig/openai/models/responses/response_file_search_call_in_progress_event.rbs b/sig/openai/models/responses/response_file_search_call_in_progress_event.rbs index d5e5064e..333b4e44 100644 --- a/sig/openai/models/responses/response_file_search_call_in_progress_event.rbs +++ b/sig/openai/models/responses/response_file_search_call_in_progress_event.rbs @@ -5,28 +5,32 @@ module OpenAI { item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.file_search_call.in_progress" } - class ResponseFileSearchCallInProgressEvent < OpenAI::BaseModel + class ResponseFileSearchCallInProgressEvent < OpenAI::Internal::Type::BaseModel attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.file_search_call.in_progress" - def initialize: - ( - item_id: String, - output_index: Integer, - type: :"response.file_search_call.in_progress" - ) -> void - | ( - ?OpenAI::Models::Responses::response_file_search_call_in_progress_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.file_search_call.in_progress" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_file_search_call_in_progress_event + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.file_search_call.in_progress" + } end end end diff --git a/sig/openai/models/responses/response_file_search_call_searching_event.rbs b/sig/openai/models/responses/response_file_search_call_searching_event.rbs index 14f56371..66149ea3 100644 --- a/sig/openai/models/responses/response_file_search_call_searching_event.rbs +++ b/sig/openai/models/responses/response_file_search_call_searching_event.rbs @@ -5,28 +5,32 @@ module OpenAI { item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.file_search_call.searching" } - class ResponseFileSearchCallSearchingEvent < OpenAI::BaseModel + class ResponseFileSearchCallSearchingEvent < OpenAI::Internal::Type::BaseModel attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.file_search_call.searching" - def initialize: - ( - item_id: String, - output_index: Integer, - type: :"response.file_search_call.searching" - ) -> void - | ( - ?OpenAI::Models::Responses::response_file_search_call_searching_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.file_search_call.searching" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_file_search_call_searching_event + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.file_search_call.searching" + } end end end diff --git a/sig/openai/models/responses/response_file_search_tool_call.rbs b/sig/openai/models/responses/response_file_search_tool_call.rbs index bff8946c..b83d2d56 100644 --- a/sig/openai/models/responses/response_file_search_tool_call.rbs +++ b/sig/openai/models/responses/response_file_search_tool_call.rbs @@ -7,10 +7,10 @@ module OpenAI queries: ::Array[String], status: OpenAI::Models::Responses::ResponseFileSearchToolCall::status, type: :file_search_call, - results: ::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result]? + results: ::Array[OpenAI::Responses::ResponseFileSearchToolCall::Result]? } - class ResponseFileSearchToolCall < OpenAI::BaseModel + class ResponseFileSearchToolCall < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor queries: ::Array[String] @@ -19,34 +19,37 @@ module OpenAI attr_accessor type: :file_search_call - attr_accessor results: ::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result]? + attr_accessor results: ::Array[OpenAI::Responses::ResponseFileSearchToolCall::Result]? - def initialize: - ( - id: String, - queries: ::Array[String], - status: OpenAI::Models::Responses::ResponseFileSearchToolCall::status, - results: ::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result]?, - type: :file_search_call - ) -> void - | ( - ?OpenAI::Models::Responses::response_file_search_tool_call - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + queries: ::Array[String], + status: OpenAI::Models::Responses::ResponseFileSearchToolCall::status, + ?results: ::Array[OpenAI::Responses::ResponseFileSearchToolCall::Result]?, + ?type: :file_search_call + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_file_search_tool_call + def to_hash: -> { + id: String, + queries: ::Array[String], + status: OpenAI::Models::Responses::ResponseFileSearchToolCall::status, + type: :file_search_call, + results: ::Array[OpenAI::Responses::ResponseFileSearchToolCall::Result]? + } type status = :in_progress | :searching | :completed | :incomplete | :failed - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress SEARCHING: :searching COMPLETED: :completed INCOMPLETE: :incomplete FAILED: :failed - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::status] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::status] end type result = @@ -58,7 +61,7 @@ module OpenAI text: String } - class Result < OpenAI::BaseModel + class Result < OpenAI::Internal::Type::BaseModel attr_accessor attributes: ::Hash[Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall::Result::attribute]? attr_reader file_id: String? @@ -77,25 +80,28 @@ module OpenAI def text=: (String) -> String - def initialize: - ( - attributes: ::Hash[Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall::Result::attribute]?, - file_id: String, - filename: String, - score: Float, - text: String - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseFileSearchToolCall::result - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseFileSearchToolCall::result + def initialize: ( + ?attributes: ::Hash[Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall::Result::attribute]?, + ?file_id: String, + ?filename: String, + ?score: Float, + ?text: String + ) -> void + + def to_hash: -> { + attributes: ::Hash[Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall::Result::attribute]?, + file_id: String, + filename: String, + score: Float, + text: String + } type attribute = String | Float | bool - class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + module Attribute + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result::attribute] end end end diff --git a/sig/openai/models/responses/response_format_text_config.rbs b/sig/openai/models/responses/response_format_text_config.rbs index bdd7473c..c085d693 100644 --- a/sig/openai/models/responses/response_format_text_config.rbs +++ b/sig/openai/models/responses/response_format_text_config.rbs @@ -2,12 +2,14 @@ module OpenAI module Models module Responses type response_format_text_config = - OpenAI::Models::ResponseFormatText - | OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig - | OpenAI::Models::ResponseFormatJSONObject + OpenAI::ResponseFormatText + | OpenAI::Responses::ResponseFormatTextJSONSchemaConfig + | OpenAI::ResponseFormatJSONObject - class ResponseFormatTextConfig < OpenAI::Union - private def self.variants: -> [[:text, OpenAI::Models::ResponseFormatText], [:json_schema, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig], [:json_object, OpenAI::Models::ResponseFormatJSONObject]] + module ResponseFormatTextConfig + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::response_format_text_config] end end end diff --git a/sig/openai/models/responses/response_format_text_json_schema_config.rbs b/sig/openai/models/responses/response_format_text_json_schema_config.rbs index a539a703..23800680 100644 --- a/sig/openai/models/responses/response_format_text_json_schema_config.rbs +++ b/sig/openai/models/responses/response_format_text_json_schema_config.rbs @@ -3,14 +3,16 @@ module OpenAI module Responses type response_format_text_json_schema_config = { + name: String, schema: ::Hash[Symbol, top], type: :json_schema, description: String, - name: String, strict: bool? } - class ResponseFormatTextJSONSchemaConfig < OpenAI::BaseModel + class ResponseFormatTextJSONSchemaConfig < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + attr_accessor schema: ::Hash[Symbol, top] attr_accessor type: :json_schema @@ -19,26 +21,23 @@ module OpenAI def description=: (String) -> String - attr_reader name: String? - - def name=: (String) -> String - attr_accessor strict: bool? - def initialize: - ( - schema: ::Hash[Symbol, top], - description: String, - name: String, - strict: bool?, - type: :json_schema - ) -> void - | ( - ?OpenAI::Models::Responses::response_format_text_json_schema_config - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_format_text_json_schema_config + def initialize: ( + name: String, + schema: ::Hash[Symbol, top], + ?description: String, + ?strict: bool?, + ?type: :json_schema + ) -> void + + def to_hash: -> { + name: String, + schema: ::Hash[Symbol, top], + type: :json_schema, + description: String, + strict: bool? + } end end end diff --git a/sig/openai/models/responses/response_function_call_arguments_delta_event.rbs b/sig/openai/models/responses/response_function_call_arguments_delta_event.rbs index 0a9a828b..9cc2eb09 100644 --- a/sig/openai/models/responses/response_function_call_arguments_delta_event.rbs +++ b/sig/openai/models/responses/response_function_call_arguments_delta_event.rbs @@ -6,31 +6,36 @@ module OpenAI delta: String, item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.function_call_arguments.delta" } - class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::BaseModel + class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel attr_accessor delta: String attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.function_call_arguments.delta" - def initialize: - ( - delta: String, - item_id: String, - output_index: Integer, - type: :"response.function_call_arguments.delta" - ) -> void - | ( - ?OpenAI::Models::Responses::response_function_call_arguments_delta_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.function_call_arguments.delta" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_function_call_arguments_delta_event + def to_hash: -> { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.function_call_arguments.delta" + } end end end diff --git a/sig/openai/models/responses/response_function_call_arguments_done_event.rbs b/sig/openai/models/responses/response_function_call_arguments_done_event.rbs index fc573721..acedda9d 100644 --- a/sig/openai/models/responses/response_function_call_arguments_done_event.rbs +++ b/sig/openai/models/responses/response_function_call_arguments_done_event.rbs @@ -6,31 +6,36 @@ module OpenAI arguments: String, item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.function_call_arguments.done" } - class ResponseFunctionCallArgumentsDoneEvent < OpenAI::BaseModel + class ResponseFunctionCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel attr_accessor arguments: String attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.function_call_arguments.done" - def initialize: - ( - arguments: String, - item_id: String, - output_index: Integer, - type: :"response.function_call_arguments.done" - ) -> void - | ( - ?OpenAI::Models::Responses::response_function_call_arguments_done_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + arguments: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.function_call_arguments.done" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_function_call_arguments_done_event + def to_hash: -> { + arguments: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.function_call_arguments.done" + } end end end diff --git a/sig/openai/models/responses/response_function_tool_call.rbs b/sig/openai/models/responses/response_function_tool_call.rbs index 1314a1f4..4da59dc0 100644 --- a/sig/openai/models/responses/response_function_tool_call.rbs +++ b/sig/openai/models/responses/response_function_tool_call.rbs @@ -3,17 +3,15 @@ module OpenAI module Responses type response_function_tool_call = { - id: String, arguments: String, call_id: String, name: String, type: :function_call, + id: String, status: OpenAI::Models::Responses::ResponseFunctionToolCall::status } - class ResponseFunctionToolCall < OpenAI::BaseModel - attr_accessor id: String - + class ResponseFunctionToolCall < OpenAI::Internal::Type::BaseModel attr_accessor arguments: String attr_accessor call_id: String @@ -22,36 +20,44 @@ module OpenAI attr_accessor type: :function_call + attr_reader id: String? + + def id=: (String) -> String + attr_reader status: OpenAI::Models::Responses::ResponseFunctionToolCall::status? def status=: ( OpenAI::Models::Responses::ResponseFunctionToolCall::status ) -> OpenAI::Models::Responses::ResponseFunctionToolCall::status - def initialize: - ( - id: String, - arguments: String, - call_id: String, - name: String, - status: OpenAI::Models::Responses::ResponseFunctionToolCall::status, - type: :function_call - ) -> void - | ( - ?OpenAI::Models::Responses::response_function_tool_call - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_function_tool_call + def initialize: ( + arguments: String, + call_id: String, + name: String, + ?id: String, + ?status: OpenAI::Models::Responses::ResponseFunctionToolCall::status, + ?type: :function_call + ) -> void + + def to_hash: -> { + arguments: String, + call_id: String, + name: String, + type: :function_call, + id: String, + status: OpenAI::Models::Responses::ResponseFunctionToolCall::status + } type status = :in_progress | :completed | :incomplete - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress COMPLETED: :completed INCOMPLETE: :incomplete - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseFunctionToolCall::status] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseFunctionToolCall::status] end end end diff --git a/sig/openai/models/responses/response_function_tool_call_item.rbs b/sig/openai/models/responses/response_function_tool_call_item.rbs new file mode 100644 index 00000000..38d76eb8 --- /dev/null +++ b/sig/openai/models/responses/response_function_tool_call_item.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Responses + type response_function_tool_call_item = { id: String } + + class ResponseFunctionToolCallItem < OpenAI::Models::Responses::ResponseFunctionToolCall + def id: -> String + + def id=: (String _) -> String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + end + end +end diff --git a/sig/openai/models/responses/response_function_tool_call_output_item.rbs b/sig/openai/models/responses/response_function_tool_call_output_item.rbs new file mode 100644 index 00000000..e9a67d83 --- /dev/null +++ b/sig/openai/models/responses/response_function_tool_call_output_item.rbs @@ -0,0 +1,58 @@ +module OpenAI + module Models + module Responses + type response_function_tool_call_output_item = + { + id: String, + call_id: String, + output: String, + type: :function_call_output, + status: OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::status + } + + class ResponseFunctionToolCallOutputItem < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor call_id: String + + attr_accessor output: String + + attr_accessor type: :function_call_output + + attr_reader status: OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::status? + + def status=: ( + OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::status + ) -> OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::status + + def initialize: ( + id: String, + call_id: String, + output: String, + ?status: OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::status, + ?type: :function_call_output + ) -> void + + def to_hash: -> { + id: String, + call_id: String, + output: String, + type: :function_call_output, + status: OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::status + } + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::status] + end + end + end + end +end diff --git a/sig/openai/models/responses/response_function_web_search.rbs b/sig/openai/models/responses/response_function_web_search.rbs index 198d14e2..2aa9d146 100644 --- a/sig/openai/models/responses/response_function_web_search.rbs +++ b/sig/openai/models/responses/response_function_web_search.rbs @@ -4,39 +4,94 @@ module OpenAI type response_function_web_search = { id: String, + action: OpenAI::Models::Responses::ResponseFunctionWebSearch::action, status: OpenAI::Models::Responses::ResponseFunctionWebSearch::status, type: :web_search_call } - class ResponseFunctionWebSearch < OpenAI::BaseModel + class ResponseFunctionWebSearch < OpenAI::Internal::Type::BaseModel attr_accessor id: String + attr_accessor action: OpenAI::Models::Responses::ResponseFunctionWebSearch::action + attr_accessor status: OpenAI::Models::Responses::ResponseFunctionWebSearch::status attr_accessor type: :web_search_call - def initialize: - ( - id: String, - status: OpenAI::Models::Responses::ResponseFunctionWebSearch::status, - type: :web_search_call - ) -> void - | ( - ?OpenAI::Models::Responses::response_function_web_search - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + action: OpenAI::Models::Responses::ResponseFunctionWebSearch::action, + status: OpenAI::Models::Responses::ResponseFunctionWebSearch::status, + ?type: :web_search_call + ) -> void + + def to_hash: -> { + id: String, + action: OpenAI::Models::Responses::ResponseFunctionWebSearch::action, + status: OpenAI::Models::Responses::ResponseFunctionWebSearch::status, + type: :web_search_call + } + + type action = + OpenAI::Responses::ResponseFunctionWebSearch::Action::Search + | OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage + | OpenAI::Responses::ResponseFunctionWebSearch::Action::Find + + module Action + extend OpenAI::Internal::Type::Union + + type search = { query: String, type: :search } + + class Search < OpenAI::Internal::Type::BaseModel + attr_accessor query: String + + attr_accessor type: :search + + def initialize: (query: String, ?type: :search) -> void - def to_hash: -> OpenAI::Models::Responses::response_function_web_search + def to_hash: -> { query: String, type: :search } + end + + type open_page = { type: :open_page, url: String } + + class OpenPage < OpenAI::Internal::Type::BaseModel + attr_accessor type: :open_page + + attr_accessor url: String + + def initialize: (url: String, ?type: :open_page) -> void + + def to_hash: -> { type: :open_page, url: String } + end + + type find = { pattern: String, type: :find, url: String } + + class Find < OpenAI::Internal::Type::BaseModel + attr_accessor pattern: String + + attr_accessor type: :find + + attr_accessor url: String + + def initialize: (pattern: String, url: String, ?type: :find) -> void + + def to_hash: -> { pattern: String, type: :find, url: String } + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseFunctionWebSearch::action] + end type status = :in_progress | :searching | :completed | :failed - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress SEARCHING: :searching COMPLETED: :completed FAILED: :failed - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseFunctionWebSearch::status] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseFunctionWebSearch::status] end end end diff --git a/sig/openai/models/responses/response_image_gen_call_completed_event.rbs b/sig/openai/models/responses/response_image_gen_call_completed_event.rbs new file mode 100644 index 00000000..aadbdaa5 --- /dev/null +++ b/sig/openai/models/responses/response_image_gen_call_completed_event.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Responses + type response_image_gen_call_completed_event = + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.image_generation_call.completed" + } + + class ResponseImageGenCallCompletedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.image_generation_call.completed" + + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.image_generation_call.completed" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.image_generation_call.completed" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_image_gen_call_generating_event.rbs b/sig/openai/models/responses/response_image_gen_call_generating_event.rbs new file mode 100644 index 00000000..a9e514e9 --- /dev/null +++ b/sig/openai/models/responses/response_image_gen_call_generating_event.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Responses + type response_image_gen_call_generating_event = + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.image_generation_call.generating" + } + + class ResponseImageGenCallGeneratingEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.image_generation_call.generating" + + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.image_generation_call.generating" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.image_generation_call.generating" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_image_gen_call_in_progress_event.rbs b/sig/openai/models/responses/response_image_gen_call_in_progress_event.rbs new file mode 100644 index 00000000..70a36323 --- /dev/null +++ b/sig/openai/models/responses/response_image_gen_call_in_progress_event.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Responses + type response_image_gen_call_in_progress_event = + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.image_generation_call.in_progress" + } + + class ResponseImageGenCallInProgressEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.image_generation_call.in_progress" + + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.image_generation_call.in_progress" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.image_generation_call.in_progress" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_image_gen_call_partial_image_event.rbs b/sig/openai/models/responses/response_image_gen_call_partial_image_event.rbs new file mode 100644 index 00000000..72ebce9d --- /dev/null +++ b/sig/openai/models/responses/response_image_gen_call_partial_image_event.rbs @@ -0,0 +1,47 @@ +module OpenAI + module Models + module Responses + type response_image_gen_call_partial_image_event = + { + item_id: String, + output_index: Integer, + :partial_image_b64 => String, + partial_image_index: Integer, + sequence_number: Integer, + type: :"response.image_generation_call.partial_image" + } + + class ResponseImageGenCallPartialImageEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor partial_image_b64: String + + attr_accessor partial_image_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.image_generation_call.partial_image" + + def initialize: ( + item_id: String, + output_index: Integer, + partial_image_b64: String, + partial_image_index: Integer, + sequence_number: Integer, + ?type: :"response.image_generation_call.partial_image" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + :partial_image_b64 => String, + partial_image_index: Integer, + sequence_number: Integer, + type: :"response.image_generation_call.partial_image" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_in_progress_event.rbs b/sig/openai/models/responses/response_in_progress_event.rbs index 2649e5cb..f3d877eb 100644 --- a/sig/openai/models/responses/response_in_progress_event.rbs +++ b/sig/openai/models/responses/response_in_progress_event.rbs @@ -3,26 +3,29 @@ module OpenAI module Responses type response_in_progress_event = { - response: OpenAI::Models::Responses::Response, + response: OpenAI::Responses::Response, + sequence_number: Integer, type: :"response.in_progress" } - class ResponseInProgressEvent < OpenAI::BaseModel - attr_accessor response: OpenAI::Models::Responses::Response + class ResponseInProgressEvent < OpenAI::Internal::Type::BaseModel + attr_accessor response: OpenAI::Responses::Response + + attr_accessor sequence_number: Integer attr_accessor type: :"response.in_progress" - def initialize: - ( - response: OpenAI::Models::Responses::Response, - type: :"response.in_progress" - ) -> void - | ( - ?OpenAI::Models::Responses::response_in_progress_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + response: OpenAI::Responses::Response, + sequence_number: Integer, + ?type: :"response.in_progress" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_in_progress_event + def to_hash: -> { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: :"response.in_progress" + } end end end diff --git a/sig/openai/models/responses/response_includable.rbs b/sig/openai/models/responses/response_includable.rbs index b2a08af5..5d4fdc9c 100644 --- a/sig/openai/models/responses/response_includable.rbs +++ b/sig/openai/models/responses/response_includable.rbs @@ -2,16 +2,24 @@ module OpenAI module Models module Responses type response_includable = - :"file_search_call.results" - | :"message.input_image.image_url" + :"code_interpreter_call.outputs" | :"computer_call_output.output.image_url" + | :"file_search_call.results" + | :"message.input_image.image_url" + | :"message.output_text.logprobs" + | :"reasoning.encrypted_content" + + module ResponseIncludable + extend OpenAI::Internal::Type::Enum - class ResponseIncludable < OpenAI::Enum + CODE_INTERPRETER_CALL_OUTPUTS: :"code_interpreter_call.outputs" + COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL: :"computer_call_output.output.image_url" FILE_SEARCH_CALL_RESULTS: :"file_search_call.results" MESSAGE_INPUT_IMAGE_IMAGE_URL: :"message.input_image.image_url" - COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL: :"computer_call_output.output.image_url" + MESSAGE_OUTPUT_TEXT_LOGPROBS: :"message.output_text.logprobs" + REASONING_ENCRYPTED_CONTENT: :"reasoning.encrypted_content" - def self.values: -> ::Array[OpenAI::Models::Responses::response_includable] + def self?.values: -> ::Array[OpenAI::Models::Responses::response_includable] end end end diff --git a/sig/openai/models/responses/response_incomplete_event.rbs b/sig/openai/models/responses/response_incomplete_event.rbs index 270ea93d..179657cd 100644 --- a/sig/openai/models/responses/response_incomplete_event.rbs +++ b/sig/openai/models/responses/response_incomplete_event.rbs @@ -3,26 +3,29 @@ module OpenAI module Responses type response_incomplete_event = { - response: OpenAI::Models::Responses::Response, + response: OpenAI::Responses::Response, + sequence_number: Integer, type: :"response.incomplete" } - class ResponseIncompleteEvent < OpenAI::BaseModel - attr_accessor response: OpenAI::Models::Responses::Response + class ResponseIncompleteEvent < OpenAI::Internal::Type::BaseModel + attr_accessor response: OpenAI::Responses::Response + + attr_accessor sequence_number: Integer attr_accessor type: :"response.incomplete" - def initialize: - ( - response: OpenAI::Models::Responses::Response, - type: :"response.incomplete" - ) -> void - | ( - ?OpenAI::Models::Responses::response_incomplete_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + response: OpenAI::Responses::Response, + sequence_number: Integer, + ?type: :"response.incomplete" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_incomplete_event + def to_hash: -> { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: :"response.incomplete" + } end end end diff --git a/sig/openai/models/responses/response_input.rbs b/sig/openai/models/responses/response_input.rbs index 2ca95b4d..494efa0c 100644 --- a/sig/openai/models/responses/response_input.rbs +++ b/sig/openai/models/responses/response_input.rbs @@ -4,7 +4,7 @@ module OpenAI type response_input = ::Array[OpenAI::Models::Responses::response_input_item] - ResponseInput: response_input + ResponseInput: OpenAI::Internal::Type::Converter end end end diff --git a/sig/openai/models/responses/response_input_audio.rbs b/sig/openai/models/responses/response_input_audio.rbs index a076443c..42b5a713 100644 --- a/sig/openai/models/responses/response_input_audio.rbs +++ b/sig/openai/models/responses/response_input_audio.rbs @@ -8,33 +8,34 @@ module OpenAI type: :input_audio } - class ResponseInputAudio < OpenAI::BaseModel + class ResponseInputAudio < OpenAI::Internal::Type::BaseModel attr_accessor data: String attr_accessor format_: OpenAI::Models::Responses::ResponseInputAudio::format_ attr_accessor type: :input_audio - def initialize: - ( - data: String, - format_: OpenAI::Models::Responses::ResponseInputAudio::format_, - type: :input_audio - ) -> void - | ( - ?OpenAI::Models::Responses::response_input_audio - | OpenAI::BaseModel data - ) -> void + def initialize: ( + data: String, + format_: OpenAI::Models::Responses::ResponseInputAudio::format_, + ?type: :input_audio + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_input_audio + def to_hash: -> { + data: String, + format_: OpenAI::Models::Responses::ResponseInputAudio::format_, + type: :input_audio + } type format_ = :mp3 | :wav - class Format < OpenAI::Enum + module Format + extend OpenAI::Internal::Type::Enum + MP3: :mp3 WAV: :wav - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseInputAudio::format_] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputAudio::format_] end end end diff --git a/sig/openai/models/responses/response_input_content.rbs b/sig/openai/models/responses/response_input_content.rbs index d12edcd5..81fa4a14 100644 --- a/sig/openai/models/responses/response_input_content.rbs +++ b/sig/openai/models/responses/response_input_content.rbs @@ -2,12 +2,14 @@ module OpenAI module Models module Responses type response_input_content = - OpenAI::Models::Responses::ResponseInputText - | OpenAI::Models::Responses::ResponseInputImage - | OpenAI::Models::Responses::ResponseInputFile + OpenAI::Responses::ResponseInputText + | OpenAI::Responses::ResponseInputImage + | OpenAI::Responses::ResponseInputFile - class ResponseInputContent < OpenAI::Union - private def self.variants: -> [[:input_text, OpenAI::Models::Responses::ResponseInputText], [:input_image, OpenAI::Models::Responses::ResponseInputImage], [:input_file, OpenAI::Models::Responses::ResponseInputFile]] + module ResponseInputContent + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::response_input_content] end end end diff --git a/sig/openai/models/responses/response_input_file.rbs b/sig/openai/models/responses/response_input_file.rbs index 90aaf6d9..5241e013 100644 --- a/sig/openai/models/responses/response_input_file.rbs +++ b/sig/openai/models/responses/response_input_file.rbs @@ -5,38 +5,43 @@ module OpenAI { type: :input_file, file_data: String, - file_id: String, + file_id: String?, + file_url: String, filename: String } - class ResponseInputFile < OpenAI::BaseModel + class ResponseInputFile < OpenAI::Internal::Type::BaseModel attr_accessor type: :input_file attr_reader file_data: String? def file_data=: (String) -> String - attr_reader file_id: String? + attr_accessor file_id: String? - def file_id=: (String) -> String + attr_reader file_url: String? + + def file_url=: (String) -> String attr_reader filename: String? def filename=: (String) -> String - def initialize: - ( - file_data: String, - file_id: String, - filename: String, - type: :input_file - ) -> void - | ( - ?OpenAI::Models::Responses::response_input_file - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_input_file + def initialize: ( + ?file_data: String, + ?file_id: String?, + ?file_url: String, + ?filename: String, + ?type: :input_file + ) -> void + + def to_hash: -> { + type: :input_file, + file_data: String, + file_id: String?, + file_url: String, + filename: String + } end end end diff --git a/sig/openai/models/responses/response_input_image.rbs b/sig/openai/models/responses/response_input_image.rbs index 18ac35c5..6b9cf49e 100644 --- a/sig/openai/models/responses/response_input_image.rbs +++ b/sig/openai/models/responses/response_input_image.rbs @@ -9,7 +9,7 @@ module OpenAI image_url: String? } - class ResponseInputImage < OpenAI::BaseModel + class ResponseInputImage < OpenAI::Internal::Type::BaseModel attr_accessor detail: OpenAI::Models::Responses::ResponseInputImage::detail attr_accessor type: :input_image @@ -18,28 +18,30 @@ module OpenAI attr_accessor image_url: String? - def initialize: - ( - detail: OpenAI::Models::Responses::ResponseInputImage::detail, - file_id: String?, - image_url: String?, - type: :input_image - ) -> void - | ( - ?OpenAI::Models::Responses::response_input_image - | OpenAI::BaseModel data - ) -> void + def initialize: ( + detail: OpenAI::Models::Responses::ResponseInputImage::detail, + ?file_id: String?, + ?image_url: String?, + ?type: :input_image + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_input_image + def to_hash: -> { + detail: OpenAI::Models::Responses::ResponseInputImage::detail, + type: :input_image, + file_id: String?, + image_url: String? + } - type detail = :high | :low | :auto + type detail = :low | :high | :auto + + module Detail + extend OpenAI::Internal::Type::Enum - class Detail < OpenAI::Enum - HIGH: :high LOW: :low + HIGH: :high AUTO: :auto - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseInputImage::detail] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputImage::detail] end end end diff --git a/sig/openai/models/responses/response_input_item.rbs b/sig/openai/models/responses/response_input_item.rbs index 6f23ae1b..08461d42 100644 --- a/sig/openai/models/responses/response_input_item.rbs +++ b/sig/openai/models/responses/response_input_item.rbs @@ -2,19 +2,31 @@ module OpenAI module Models module Responses type response_input_item = - OpenAI::Models::Responses::EasyInputMessage - | OpenAI::Models::Responses::ResponseInputItem::Message - | OpenAI::Models::Responses::ResponseOutputMessage - | OpenAI::Models::Responses::ResponseFileSearchToolCall - | OpenAI::Models::Responses::ResponseComputerToolCall - | OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput - | OpenAI::Models::Responses::ResponseFunctionWebSearch - | OpenAI::Models::Responses::ResponseFunctionToolCall - | OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput - | OpenAI::Models::Responses::ResponseReasoningItem - | OpenAI::Models::Responses::ResponseInputItem::ItemReference - - class ResponseInputItem < OpenAI::Union + OpenAI::Responses::EasyInputMessage + | OpenAI::Responses::ResponseInputItem::Message + | OpenAI::Responses::ResponseOutputMessage + | OpenAI::Responses::ResponseFileSearchToolCall + | OpenAI::Responses::ResponseComputerToolCall + | OpenAI::Responses::ResponseInputItem::ComputerCallOutput + | OpenAI::Responses::ResponseFunctionWebSearch + | OpenAI::Responses::ResponseFunctionToolCall + | OpenAI::Responses::ResponseInputItem::FunctionCallOutput + | OpenAI::Responses::ResponseReasoningItem + | OpenAI::Responses::ResponseInputItem::ImageGenerationCall + | OpenAI::Responses::ResponseCodeInterpreterToolCall + | OpenAI::Responses::ResponseInputItem::LocalShellCall + | OpenAI::Responses::ResponseInputItem::LocalShellCallOutput + | OpenAI::Responses::ResponseInputItem::McpListTools + | OpenAI::Responses::ResponseInputItem::McpApprovalRequest + | OpenAI::Responses::ResponseInputItem::McpApprovalResponse + | OpenAI::Responses::ResponseInputItem::McpCall + | OpenAI::Responses::ResponseCustomToolCallOutput + | OpenAI::Responses::ResponseCustomToolCall + | OpenAI::Responses::ResponseInputItem::ItemReference + + module ResponseInputItem + extend OpenAI::Internal::Type::Union + type message = { content: OpenAI::Models::Responses::response_input_message_content_list, @@ -23,7 +35,7 @@ module OpenAI type: OpenAI::Models::Responses::ResponseInputItem::Message::type_ } - class Message < OpenAI::BaseModel + class Message < OpenAI::Internal::Type::BaseModel attr_accessor content: OpenAI::Models::Responses::response_input_message_content_list attr_accessor role: OpenAI::Models::Responses::ResponseInputItem::Message::role @@ -40,227 +52,580 @@ module OpenAI OpenAI::Models::Responses::ResponseInputItem::Message::type_ ) -> OpenAI::Models::Responses::ResponseInputItem::Message::type_ - def initialize: - ( - content: OpenAI::Models::Responses::response_input_message_content_list, - role: OpenAI::Models::Responses::ResponseInputItem::Message::role, - status: OpenAI::Models::Responses::ResponseInputItem::Message::status, - type: OpenAI::Models::Responses::ResponseInputItem::Message::type_ - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseInputItem::message - | OpenAI::BaseModel data - ) -> void + def initialize: ( + content: OpenAI::Models::Responses::response_input_message_content_list, + role: OpenAI::Models::Responses::ResponseInputItem::Message::role, + ?status: OpenAI::Models::Responses::ResponseInputItem::Message::status, + ?type: OpenAI::Models::Responses::ResponseInputItem::Message::type_ + ) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseInputItem::message + def to_hash: -> { + content: OpenAI::Models::Responses::response_input_message_content_list, + role: OpenAI::Models::Responses::ResponseInputItem::Message::role, + status: OpenAI::Models::Responses::ResponseInputItem::Message::status, + type: OpenAI::Models::Responses::ResponseInputItem::Message::type_ + } type role = :user | :system | :developer - class Role < OpenAI::Enum + module Role + extend OpenAI::Internal::Type::Enum + USER: :user SYSTEM: :system DEVELOPER: :developer - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::Message::role] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::Message::role] end type status = :in_progress | :completed | :incomplete - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress COMPLETED: :completed INCOMPLETE: :incomplete - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::Message::status] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::Message::status] end type type_ = :message - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + MESSAGE: :message - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::Message::type_] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::Message::type_] end end type computer_call_output = { call_id: String, - output: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output, + output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, type: :computer_call_output, - id: String, - acknowledged_safety_checks: ::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck], - status: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status + id: String?, + acknowledged_safety_checks: ::Array[OpenAI::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck]?, + status: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status? } - class ComputerCallOutput < OpenAI::BaseModel + class ComputerCallOutput < OpenAI::Internal::Type::BaseModel attr_accessor call_id: String - attr_accessor output: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output + attr_accessor output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot attr_accessor type: :computer_call_output - attr_reader id: String? + attr_accessor id: String? - def id=: (String) -> String + attr_accessor acknowledged_safety_checks: ::Array[OpenAI::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck]? - attr_reader acknowledged_safety_checks: ::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck]? + attr_accessor status: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status? - def acknowledged_safety_checks=: ( - ::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck] - ) -> ::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck] + def initialize: ( + call_id: String, + output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + ?id: String?, + ?acknowledged_safety_checks: ::Array[OpenAI::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck]?, + ?status: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status?, + ?type: :computer_call_output + ) -> void + + def to_hash: -> { + call_id: String, + output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + type: :computer_call_output, + id: String?, + acknowledged_safety_checks: ::Array[OpenAI::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck]?, + status: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status? + } - attr_reader status: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status? + type acknowledged_safety_check = + { id: String, code: String?, message: String? } - def status=: ( - OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status - ) -> OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status + class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor code: String? + + attr_accessor message: String? - def initialize: - ( - call_id: String, - output: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output, + def initialize: ( id: String, - acknowledged_safety_checks: ::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck], - status: OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status, - type: :computer_call_output - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseInputItem::computer_call_output - | OpenAI::BaseModel data + ?code: String?, + ?message: String? ) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseInputItem::computer_call_output + def to_hash: -> { id: String, code: String?, message: String? } + end + + type status = :in_progress | :completed | :incomplete - type output = - { type: :computer_screenshot, file_id: String, image_url: String } + module Status + extend OpenAI::Internal::Type::Enum - class Output < OpenAI::BaseModel - attr_accessor type: :computer_screenshot + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete - attr_reader file_id: String? + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status] + end + end - def file_id=: (String) -> String + type function_call_output = + { + call_id: String, + output: String, + type: :function_call_output, + id: String?, + status: OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status? + } - attr_reader image_url: String? + class FunctionCallOutput < OpenAI::Internal::Type::BaseModel + attr_accessor call_id: String - def image_url=: (String) -> String + attr_accessor output: String - def initialize: - ( - file_id: String, - image_url: String, - type: :computer_screenshot - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::output - | OpenAI::BaseModel data - ) -> void + attr_accessor type: :function_call_output - def to_hash: -> OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::output - end + attr_accessor id: String? - type acknowledged_safety_check = - { id: String, code: String, message: String } + attr_accessor status: OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status? - class AcknowledgedSafetyCheck < OpenAI::BaseModel - attr_accessor id: String + def initialize: ( + call_id: String, + output: String, + ?id: String?, + ?status: OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status?, + ?type: :function_call_output + ) -> void - attr_accessor code: String + def to_hash: -> { + call_id: String, + output: String, + type: :function_call_output, + id: String?, + status: OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status? + } - attr_accessor message: String + type status = :in_progress | :completed | :incomplete - def initialize: - (id: String, code: String, message: String) -> void - | ( - ?OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::acknowledged_safety_check - | OpenAI::BaseModel data - ) -> void + module Status + extend OpenAI::Internal::Type::Enum - def to_hash: -> OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::acknowledged_safety_check + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status] end + end - type status = :in_progress | :completed | :incomplete + type image_generation_call = + { + id: String, + result: String?, + status: OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall::status, + type: :image_generation_call + } + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor result: String? + + attr_accessor status: OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall::status + + attr_accessor type: :image_generation_call + + def initialize: ( + id: String, + result: String?, + status: OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall::status, + ?type: :image_generation_call + ) -> void + + def to_hash: -> { + id: String, + result: String?, + status: OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall::status, + type: :image_generation_call + } + + type status = :in_progress | :completed | :generating | :failed + + module Status + extend OpenAI::Internal::Type::Enum - class Status < OpenAI::Enum IN_PROGRESS: :in_progress COMPLETED: :completed - INCOMPLETE: :incomplete + GENERATING: :generating + FAILED: :failed - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::status] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall::status] end end - type function_call_output = + type local_shell_call = { - call_id: String, - output: String, - type: :function_call_output, id: String, - status: OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status + action: OpenAI::Responses::ResponseInputItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::status, + type: :local_shell_call } - class FunctionCallOutput < OpenAI::BaseModel + class LocalShellCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor action: OpenAI::Responses::ResponseInputItem::LocalShellCall::Action + attr_accessor call_id: String - attr_accessor output: String + attr_accessor status: OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::status - attr_accessor type: :function_call_output + attr_accessor type: :local_shell_call - attr_reader id: String? + def initialize: ( + id: String, + action: OpenAI::Responses::ResponseInputItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::status, + ?type: :local_shell_call + ) -> void - def id=: (String) -> String + def to_hash: -> { + id: String, + action: OpenAI::Responses::ResponseInputItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::status, + type: :local_shell_call + } - attr_reader status: OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status? + type action = + { + command: ::Array[String], + env: ::Hash[Symbol, String], + type: :exec, + timeout_ms: Integer?, + user: String?, + working_directory: String? + } - def status=: ( - OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status - ) -> OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status + class Action < OpenAI::Internal::Type::BaseModel + attr_accessor command: ::Array[String] - def initialize: - ( - call_id: String, - output: String, - id: String, - status: OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status, - type: :function_call_output - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseInputItem::function_call_output - | OpenAI::BaseModel data + attr_accessor env: ::Hash[Symbol, String] + + attr_accessor type: :exec + + attr_accessor timeout_ms: Integer? + + attr_accessor user: String? + + attr_accessor working_directory: String? + + def initialize: ( + command: ::Array[String], + env: ::Hash[Symbol, String], + ?timeout_ms: Integer?, + ?user: String?, + ?working_directory: String?, + ?type: :exec ) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseInputItem::function_call_output + def to_hash: -> { + command: ::Array[String], + env: ::Hash[Symbol, String], + type: :exec, + timeout_ms: Integer?, + user: String?, + working_directory: String? + } + end + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::status] + end + end + + type local_shell_call_output = + { + id: String, + output: String, + type: :local_shell_call_output, + status: OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput::status? + } + + class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor output: String + + attr_accessor type: :local_shell_call_output + + attr_accessor status: OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput::status? + + def initialize: ( + id: String, + output: String, + ?status: OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput::status?, + ?type: :local_shell_call_output + ) -> void + + def to_hash: -> { + id: String, + output: String, + type: :local_shell_call_output, + status: OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput::status? + } type status = :in_progress | :completed | :incomplete - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress COMPLETED: :completed INCOMPLETE: :incomplete - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::status] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput::status] end end - type item_reference = { id: String, type: :item_reference } + type mcp_list_tools = + { + id: String, + server_label: String, + tools: ::Array[OpenAI::Responses::ResponseInputItem::McpListTools::Tool], + type: :mcp_list_tools, + error: String? + } - class ItemReference < OpenAI::BaseModel + class McpListTools < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor type: :item_reference + attr_accessor server_label: String - def initialize: - (id: String, type: :item_reference) -> void - | ( - ?OpenAI::Models::Responses::ResponseInputItem::item_reference - | OpenAI::BaseModel data + attr_accessor tools: ::Array[OpenAI::Responses::ResponseInputItem::McpListTools::Tool] + + attr_accessor type: :mcp_list_tools + + attr_accessor error: String? + + def initialize: ( + id: String, + server_label: String, + tools: ::Array[OpenAI::Responses::ResponseInputItem::McpListTools::Tool], + ?error: String?, + ?type: :mcp_list_tools + ) -> void + + def to_hash: -> { + id: String, + server_label: String, + tools: ::Array[OpenAI::Responses::ResponseInputItem::McpListTools::Tool], + type: :mcp_list_tools, + error: String? + } + + type tool = + { + input_schema: top, + name: String, + annotations: top?, + description: String? + } + + class Tool < OpenAI::Internal::Type::BaseModel + attr_accessor input_schema: top + + attr_accessor name: String + + attr_accessor annotations: top? + + attr_accessor description: String? + + def initialize: ( + input_schema: top, + name: String, + ?annotations: top?, + ?description: String? ) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseInputItem::item_reference + def to_hash: -> { + input_schema: top, + name: String, + annotations: top?, + description: String? + } + end + end + + type mcp_approval_request = + { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_approval_request + } + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor arguments: String + + attr_accessor name: String + + attr_accessor server_label: String + + attr_accessor type: :mcp_approval_request + + def initialize: ( + id: String, + arguments: String, + name: String, + server_label: String, + ?type: :mcp_approval_request + ) -> void + + def to_hash: -> { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_approval_request + } + end + + type mcp_approval_response = + { + approval_request_id: String, + approve: bool, + type: :mcp_approval_response, + id: String?, + reason: String? + } + + class McpApprovalResponse < OpenAI::Internal::Type::BaseModel + attr_accessor approval_request_id: String + + attr_accessor approve: bool + + attr_accessor type: :mcp_approval_response + + attr_accessor id: String? + + attr_accessor reason: String? + + def initialize: ( + approval_request_id: String, + approve: bool, + ?id: String?, + ?reason: String?, + ?type: :mcp_approval_response + ) -> void + + def to_hash: -> { + approval_request_id: String, + approve: bool, + type: :mcp_approval_response, + id: String?, + reason: String? + } + end + + type mcp_call = + { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_call, + error: String?, + output: String? + } + + class McpCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor arguments: String + + attr_accessor name: String + + attr_accessor server_label: String + + attr_accessor type: :mcp_call + + attr_accessor error: String? + + attr_accessor output: String? + + def initialize: ( + id: String, + arguments: String, + name: String, + server_label: String, + ?error: String?, + ?output: String?, + ?type: :mcp_call + ) -> void + + def to_hash: -> { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_call, + error: String?, + output: String? + } + end + + type item_reference = + { + id: String, + type: OpenAI::Models::Responses::ResponseInputItem::ItemReference::type_? + } + + class ItemReference < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor type: OpenAI::Models::Responses::ResponseInputItem::ItemReference::type_? + + def initialize: ( + id: String, + ?type: OpenAI::Models::Responses::ResponseInputItem::ItemReference::type_? + ) -> void + + def to_hash: -> { + id: String, + type: OpenAI::Models::Responses::ResponseInputItem::ItemReference::type_? + } + + type type_ = :item_reference + + module Type + extend OpenAI::Internal::Type::Enum + + ITEM_REFERENCE: :item_reference + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputItem::ItemReference::type_] + end end - private def self.variants: -> [[:message, OpenAI::Models::Responses::EasyInputMessage], [:message, OpenAI::Models::Responses::ResponseInputItem::Message], [:message, OpenAI::Models::Responses::ResponseOutputMessage], [:file_search_call, OpenAI::Models::Responses::ResponseFileSearchToolCall], [:computer_call, OpenAI::Models::Responses::ResponseComputerToolCall], [:computer_call_output, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput], [:web_search_call, OpenAI::Models::Responses::ResponseFunctionWebSearch], [:function_call, OpenAI::Models::Responses::ResponseFunctionToolCall], [:function_call_output, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput], [:reasoning, OpenAI::Models::Responses::ResponseReasoningItem], [:item_reference, OpenAI::Models::Responses::ResponseInputItem::ItemReference]] + def self?.variants: -> ::Array[OpenAI::Models::Responses::response_input_item] end end end diff --git a/sig/openai/models/responses/response_input_message_content_list.rbs b/sig/openai/models/responses/response_input_message_content_list.rbs index d269f8ed..38dd2e85 100644 --- a/sig/openai/models/responses/response_input_message_content_list.rbs +++ b/sig/openai/models/responses/response_input_message_content_list.rbs @@ -4,7 +4,7 @@ module OpenAI type response_input_message_content_list = ::Array[OpenAI::Models::Responses::response_input_content] - ResponseInputMessageContentList: response_input_message_content_list + ResponseInputMessageContentList: OpenAI::Internal::Type::Converter end end end diff --git a/sig/openai/models/responses/response_input_message_item.rbs b/sig/openai/models/responses/response_input_message_item.rbs new file mode 100644 index 00000000..ff6874a7 --- /dev/null +++ b/sig/openai/models/responses/response_input_message_item.rbs @@ -0,0 +1,84 @@ +module OpenAI + module Models + module Responses + type response_input_message_item = + { + id: String, + content: OpenAI::Models::Responses::response_input_message_content_list, + role: OpenAI::Models::Responses::ResponseInputMessageItem::role, + status: OpenAI::Models::Responses::ResponseInputMessageItem::status, + type: OpenAI::Models::Responses::ResponseInputMessageItem::type_ + } + + class ResponseInputMessageItem < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor content: OpenAI::Models::Responses::response_input_message_content_list + + attr_accessor role: OpenAI::Models::Responses::ResponseInputMessageItem::role + + attr_reader status: OpenAI::Models::Responses::ResponseInputMessageItem::status? + + def status=: ( + OpenAI::Models::Responses::ResponseInputMessageItem::status + ) -> OpenAI::Models::Responses::ResponseInputMessageItem::status + + attr_reader type: OpenAI::Models::Responses::ResponseInputMessageItem::type_? + + def type=: ( + OpenAI::Models::Responses::ResponseInputMessageItem::type_ + ) -> OpenAI::Models::Responses::ResponseInputMessageItem::type_ + + def initialize: ( + id: String, + content: OpenAI::Models::Responses::response_input_message_content_list, + role: OpenAI::Models::Responses::ResponseInputMessageItem::role, + ?status: OpenAI::Models::Responses::ResponseInputMessageItem::status, + ?type: OpenAI::Models::Responses::ResponseInputMessageItem::type_ + ) -> void + + def to_hash: -> { + id: String, + content: OpenAI::Models::Responses::response_input_message_content_list, + role: OpenAI::Models::Responses::ResponseInputMessageItem::role, + status: OpenAI::Models::Responses::ResponseInputMessageItem::status, + type: OpenAI::Models::Responses::ResponseInputMessageItem::type_ + } + + type role = :user | :system | :developer + + module Role + extend OpenAI::Internal::Type::Enum + + USER: :user + SYSTEM: :system + DEVELOPER: :developer + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputMessageItem::role] + end + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputMessageItem::status] + end + + type type_ = :message + + module Type + extend OpenAI::Internal::Type::Enum + + MESSAGE: :message + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseInputMessageItem::type_] + end + end + end + end +end diff --git a/sig/openai/models/responses/response_input_text.rbs b/sig/openai/models/responses/response_input_text.rbs index df1540c3..76771a1f 100644 --- a/sig/openai/models/responses/response_input_text.rbs +++ b/sig/openai/models/responses/response_input_text.rbs @@ -3,19 +3,14 @@ module OpenAI module Responses type response_input_text = { text: String, type: :input_text } - class ResponseInputText < OpenAI::BaseModel + class ResponseInputText < OpenAI::Internal::Type::BaseModel attr_accessor text: String attr_accessor type: :input_text - def initialize: - (text: String, type: :input_text) -> void - | ( - ?OpenAI::Models::Responses::response_input_text - | OpenAI::BaseModel data - ) -> void + def initialize: (text: String, ?type: :input_text) -> void - def to_hash: -> OpenAI::Models::Responses::response_input_text + def to_hash: -> { text: String, type: :input_text } end end end diff --git a/sig/openai/models/responses/response_item.rbs b/sig/openai/models/responses/response_item.rbs new file mode 100644 index 00000000..f8f79e06 --- /dev/null +++ b/sig/openai/models/responses/response_item.rbs @@ -0,0 +1,399 @@ +module OpenAI + module Models + module Responses + type response_item = + OpenAI::Responses::ResponseInputMessageItem + | OpenAI::Responses::ResponseOutputMessage + | OpenAI::Responses::ResponseFileSearchToolCall + | OpenAI::Responses::ResponseComputerToolCall + | OpenAI::Responses::ResponseComputerToolCallOutputItem + | OpenAI::Responses::ResponseFunctionWebSearch + | OpenAI::Responses::ResponseFunctionToolCallItem + | OpenAI::Responses::ResponseFunctionToolCallOutputItem + | OpenAI::Responses::ResponseItem::ImageGenerationCall + | OpenAI::Responses::ResponseCodeInterpreterToolCall + | OpenAI::Responses::ResponseItem::LocalShellCall + | OpenAI::Responses::ResponseItem::LocalShellCallOutput + | OpenAI::Responses::ResponseItem::McpListTools + | OpenAI::Responses::ResponseItem::McpApprovalRequest + | OpenAI::Responses::ResponseItem::McpApprovalResponse + | OpenAI::Responses::ResponseItem::McpCall + + module ResponseItem + extend OpenAI::Internal::Type::Union + + type image_generation_call = + { + id: String, + result: String?, + status: OpenAI::Models::Responses::ResponseItem::ImageGenerationCall::status, + type: :image_generation_call + } + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor result: String? + + attr_accessor status: OpenAI::Models::Responses::ResponseItem::ImageGenerationCall::status + + attr_accessor type: :image_generation_call + + def initialize: ( + id: String, + result: String?, + status: OpenAI::Models::Responses::ResponseItem::ImageGenerationCall::status, + ?type: :image_generation_call + ) -> void + + def to_hash: -> { + id: String, + result: String?, + status: OpenAI::Models::Responses::ResponseItem::ImageGenerationCall::status, + type: :image_generation_call + } + + type status = :in_progress | :completed | :generating | :failed + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + GENERATING: :generating + FAILED: :failed + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseItem::ImageGenerationCall::status] + end + end + + type local_shell_call = + { + id: String, + action: OpenAI::Responses::ResponseItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseItem::LocalShellCall::status, + type: :local_shell_call + } + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor action: OpenAI::Responses::ResponseItem::LocalShellCall::Action + + attr_accessor call_id: String + + attr_accessor status: OpenAI::Models::Responses::ResponseItem::LocalShellCall::status + + attr_accessor type: :local_shell_call + + def initialize: ( + id: String, + action: OpenAI::Responses::ResponseItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseItem::LocalShellCall::status, + ?type: :local_shell_call + ) -> void + + def to_hash: -> { + id: String, + action: OpenAI::Responses::ResponseItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseItem::LocalShellCall::status, + type: :local_shell_call + } + + type action = + { + command: ::Array[String], + env: ::Hash[Symbol, String], + type: :exec, + timeout_ms: Integer?, + user: String?, + working_directory: String? + } + + class Action < OpenAI::Internal::Type::BaseModel + attr_accessor command: ::Array[String] + + attr_accessor env: ::Hash[Symbol, String] + + attr_accessor type: :exec + + attr_accessor timeout_ms: Integer? + + attr_accessor user: String? + + attr_accessor working_directory: String? + + def initialize: ( + command: ::Array[String], + env: ::Hash[Symbol, String], + ?timeout_ms: Integer?, + ?user: String?, + ?working_directory: String?, + ?type: :exec + ) -> void + + def to_hash: -> { + command: ::Array[String], + env: ::Hash[Symbol, String], + type: :exec, + timeout_ms: Integer?, + user: String?, + working_directory: String? + } + end + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseItem::LocalShellCall::status] + end + end + + type local_shell_call_output = + { + id: String, + output: String, + type: :local_shell_call_output, + status: OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput::status? + } + + class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor output: String + + attr_accessor type: :local_shell_call_output + + attr_accessor status: OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput::status? + + def initialize: ( + id: String, + output: String, + ?status: OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput::status?, + ?type: :local_shell_call_output + ) -> void + + def to_hash: -> { + id: String, + output: String, + type: :local_shell_call_output, + status: OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput::status? + } + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput::status] + end + end + + type mcp_list_tools = + { + id: String, + server_label: String, + tools: ::Array[OpenAI::Responses::ResponseItem::McpListTools::Tool], + type: :mcp_list_tools, + error: String? + } + + class McpListTools < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor server_label: String + + attr_accessor tools: ::Array[OpenAI::Responses::ResponseItem::McpListTools::Tool] + + attr_accessor type: :mcp_list_tools + + attr_accessor error: String? + + def initialize: ( + id: String, + server_label: String, + tools: ::Array[OpenAI::Responses::ResponseItem::McpListTools::Tool], + ?error: String?, + ?type: :mcp_list_tools + ) -> void + + def to_hash: -> { + id: String, + server_label: String, + tools: ::Array[OpenAI::Responses::ResponseItem::McpListTools::Tool], + type: :mcp_list_tools, + error: String? + } + + type tool = + { + input_schema: top, + name: String, + annotations: top?, + description: String? + } + + class Tool < OpenAI::Internal::Type::BaseModel + attr_accessor input_schema: top + + attr_accessor name: String + + attr_accessor annotations: top? + + attr_accessor description: String? + + def initialize: ( + input_schema: top, + name: String, + ?annotations: top?, + ?description: String? + ) -> void + + def to_hash: -> { + input_schema: top, + name: String, + annotations: top?, + description: String? + } + end + end + + type mcp_approval_request = + { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_approval_request + } + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor arguments: String + + attr_accessor name: String + + attr_accessor server_label: String + + attr_accessor type: :mcp_approval_request + + def initialize: ( + id: String, + arguments: String, + name: String, + server_label: String, + ?type: :mcp_approval_request + ) -> void + + def to_hash: -> { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_approval_request + } + end + + type mcp_approval_response = + { + id: String, + approval_request_id: String, + approve: bool, + type: :mcp_approval_response, + reason: String? + } + + class McpApprovalResponse < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor approval_request_id: String + + attr_accessor approve: bool + + attr_accessor type: :mcp_approval_response + + attr_accessor reason: String? + + def initialize: ( + id: String, + approval_request_id: String, + approve: bool, + ?reason: String?, + ?type: :mcp_approval_response + ) -> void + + def to_hash: -> { + id: String, + approval_request_id: String, + approve: bool, + type: :mcp_approval_response, + reason: String? + } + end + + type mcp_call = + { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_call, + error: String?, + output: String? + } + + class McpCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor arguments: String + + attr_accessor name: String + + attr_accessor server_label: String + + attr_accessor type: :mcp_call + + attr_accessor error: String? + + attr_accessor output: String? + + def initialize: ( + id: String, + arguments: String, + name: String, + server_label: String, + ?error: String?, + ?output: String?, + ?type: :mcp_call + ) -> void + + def to_hash: -> { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_call, + error: String?, + output: String? + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::response_item] + end + end + end +end diff --git a/sig/openai/models/responses/response_item_list.rbs b/sig/openai/models/responses/response_item_list.rbs index faf8e1a5..7614060c 100644 --- a/sig/openai/models/responses/response_item_list.rbs +++ b/sig/openai/models/responses/response_item_list.rbs @@ -1,20 +1,19 @@ module OpenAI module Models - class ResponseItemList = Responses::ResponseItemList module Responses type response_item_list = { - data: ::Array[OpenAI::Models::Responses::ResponseItemList::data], + data: ::Array[OpenAI::Models::Responses::response_item], first_id: String, has_more: bool, last_id: String, object: :list } - class ResponseItemList < OpenAI::BaseModel - attr_accessor data: ::Array[OpenAI::Models::Responses::ResponseItemList::data] + class ResponseItemList < OpenAI::Internal::Type::BaseModel + attr_accessor data: ::Array[OpenAI::Models::Responses::response_item] attr_accessor first_id: String @@ -24,262 +23,21 @@ module OpenAI attr_accessor object: :list - def initialize: - ( - data: ::Array[OpenAI::Models::Responses::ResponseItemList::data], - first_id: String, - has_more: bool, - last_id: String, - object: :list - ) -> void - | ( - ?OpenAI::Models::Responses::response_item_list - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_item_list - - type data = - OpenAI::Models::Responses::ResponseItemList::Data::Message - | OpenAI::Models::Responses::ResponseOutputMessage - | OpenAI::Models::Responses::ResponseFileSearchToolCall - | OpenAI::Models::Responses::ResponseComputerToolCall - | OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput - | OpenAI::Models::Responses::ResponseFunctionWebSearch - | OpenAI::Models::Responses::ResponseFunctionToolCall - | OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput - - class Data < OpenAI::Union - type message = - { - id: String, - content: OpenAI::Models::Responses::response_input_message_content_list, - role: OpenAI::Models::Responses::ResponseItemList::Data::Message::role, - status: OpenAI::Models::Responses::ResponseItemList::Data::Message::status, - type: OpenAI::Models::Responses::ResponseItemList::Data::Message::type_ - } - - class Message < OpenAI::BaseModel - attr_accessor id: String - - attr_accessor content: OpenAI::Models::Responses::response_input_message_content_list - - attr_accessor role: OpenAI::Models::Responses::ResponseItemList::Data::Message::role - - attr_reader status: OpenAI::Models::Responses::ResponseItemList::Data::Message::status? - - def status=: ( - OpenAI::Models::Responses::ResponseItemList::Data::Message::status - ) -> OpenAI::Models::Responses::ResponseItemList::Data::Message::status - - attr_reader type: OpenAI::Models::Responses::ResponseItemList::Data::Message::type_? - - def type=: ( - OpenAI::Models::Responses::ResponseItemList::Data::Message::type_ - ) -> OpenAI::Models::Responses::ResponseItemList::Data::Message::type_ - - def initialize: - ( - id: String, - content: OpenAI::Models::Responses::response_input_message_content_list, - role: OpenAI::Models::Responses::ResponseItemList::Data::Message::role, - status: OpenAI::Models::Responses::ResponseItemList::Data::Message::status, - type: OpenAI::Models::Responses::ResponseItemList::Data::Message::type_ - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseItemList::Data::message - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseItemList::Data::message - - type role = :user | :system | :developer - - class Role < OpenAI::Enum - USER: :user - SYSTEM: :system - DEVELOPER: :developer - - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseItemList::Data::Message::role] - end - - type status = :in_progress | :completed | :incomplete - - class Status < OpenAI::Enum - IN_PROGRESS: :in_progress - COMPLETED: :completed - INCOMPLETE: :incomplete - - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseItemList::Data::Message::status] - end - - type type_ = :message - - class Type < OpenAI::Enum - MESSAGE: :message - - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseItemList::Data::Message::type_] - end - end - - type computer_call_output = - { - id: String, - call_id: String, - output: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output, - type: :computer_call_output, - acknowledged_safety_checks: ::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck], - status: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::status - } - - class ComputerCallOutput < OpenAI::BaseModel - attr_accessor id: String - - attr_accessor call_id: String - - attr_accessor output: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output - - attr_accessor type: :computer_call_output - - attr_reader acknowledged_safety_checks: ::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck]? - - def acknowledged_safety_checks=: ( - ::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck] - ) -> ::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck] - - attr_reader status: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::status? - - def status=: ( - OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::status - ) -> OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::status - - def initialize: - ( - id: String, - call_id: String, - output: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output, - acknowledged_safety_checks: ::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck], - status: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::status, - type: :computer_call_output - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseItemList::Data::computer_call_output - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseItemList::Data::computer_call_output - - type output = - { type: :computer_screenshot, file_id: String, image_url: String } - - class Output < OpenAI::BaseModel - attr_accessor type: :computer_screenshot - - attr_reader file_id: String? - - def file_id=: (String) -> String - - attr_reader image_url: String? - - def image_url=: (String) -> String - - def initialize: - ( - file_id: String, - image_url: String, - type: :computer_screenshot - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::output - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::output - end - - type acknowledged_safety_check = - { id: String, code: String, message: String } - - class AcknowledgedSafetyCheck < OpenAI::BaseModel - attr_accessor id: String - - attr_accessor code: String - - attr_accessor message: String - - def initialize: - (id: String, code: String, message: String) -> void - | ( - ?OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::acknowledged_safety_check - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::acknowledged_safety_check - end - - type status = :in_progress | :completed | :incomplete - - class Status < OpenAI::Enum - IN_PROGRESS: :in_progress - COMPLETED: :completed - INCOMPLETE: :incomplete - - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::status] - end - end - - type function_call_output = - { - id: String, - call_id: String, - output: String, - type: :function_call_output, - status: OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::status - } - - class FunctionCallOutput < OpenAI::BaseModel - attr_accessor id: String - - attr_accessor call_id: String - - attr_accessor output: String - - attr_accessor type: :function_call_output - - attr_reader status: OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::status? - - def status=: ( - OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::status - ) -> OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::status - - def initialize: - ( - id: String, - call_id: String, - output: String, - status: OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::status, - type: :function_call_output - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseItemList::Data::function_call_output - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseItemList::Data::function_call_output - - type status = :in_progress | :completed | :incomplete - - class Status < OpenAI::Enum - IN_PROGRESS: :in_progress - COMPLETED: :completed - INCOMPLETE: :incomplete - - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::status] - end - end + def initialize: ( + data: ::Array[OpenAI::Models::Responses::response_item], + first_id: String, + has_more: bool, + last_id: String, + ?object: :list + ) -> void - private def self.variants: -> [[:message, OpenAI::Models::Responses::ResponseItemList::Data::Message], [:message, OpenAI::Models::Responses::ResponseOutputMessage], [:file_search_call, OpenAI::Models::Responses::ResponseFileSearchToolCall], [:computer_call, OpenAI::Models::Responses::ResponseComputerToolCall], [:computer_call_output, OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput], [:web_search_call, OpenAI::Models::Responses::ResponseFunctionWebSearch], [:function_call, OpenAI::Models::Responses::ResponseFunctionToolCall], [:function_call_output, OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput]] - end + def to_hash: -> { + data: ::Array[OpenAI::Models::Responses::response_item], + first_id: String, + has_more: bool, + last_id: String, + object: :list + } end end end diff --git a/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs b/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs new file mode 100644 index 00000000..09b2df87 --- /dev/null +++ b/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs @@ -0,0 +1,42 @@ +module OpenAI + module Models + module Responses + type response_mcp_call_arguments_delta_event = + { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_call_arguments.delta" + } + + class ResponseMcpCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel + attr_accessor delta: String + + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.mcp_call_arguments.delta" + + def initialize: ( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.mcp_call_arguments.delta" + ) -> void + + def to_hash: -> { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_call_arguments.delta" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs b/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs new file mode 100644 index 00000000..b997774e --- /dev/null +++ b/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs @@ -0,0 +1,42 @@ +module OpenAI + module Models + module Responses + type response_mcp_call_arguments_done_event = + { + arguments: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_call_arguments.done" + } + + class ResponseMcpCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel + attr_accessor arguments: String + + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.mcp_call_arguments.done" + + def initialize: ( + arguments: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.mcp_call_arguments.done" + ) -> void + + def to_hash: -> { + arguments: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_call_arguments.done" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_mcp_call_completed_event.rbs b/sig/openai/models/responses/response_mcp_call_completed_event.rbs new file mode 100644 index 00000000..85f13ce9 --- /dev/null +++ b/sig/openai/models/responses/response_mcp_call_completed_event.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Responses + type response_mcp_call_completed_event = + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_call.completed" + } + + class ResponseMcpCallCompletedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.mcp_call.completed" + + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.mcp_call.completed" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_call.completed" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_mcp_call_failed_event.rbs b/sig/openai/models/responses/response_mcp_call_failed_event.rbs new file mode 100644 index 00000000..a38a0c39 --- /dev/null +++ b/sig/openai/models/responses/response_mcp_call_failed_event.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Responses + type response_mcp_call_failed_event = + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_call.failed" + } + + class ResponseMcpCallFailedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.mcp_call.failed" + + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.mcp_call.failed" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_call.failed" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_mcp_call_in_progress_event.rbs b/sig/openai/models/responses/response_mcp_call_in_progress_event.rbs new file mode 100644 index 00000000..7dc8afd2 --- /dev/null +++ b/sig/openai/models/responses/response_mcp_call_in_progress_event.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Responses + type response_mcp_call_in_progress_event = + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_call.in_progress" + } + + class ResponseMcpCallInProgressEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.mcp_call.in_progress" + + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.mcp_call.in_progress" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_call.in_progress" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs b/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs new file mode 100644 index 00000000..2e6aa85a --- /dev/null +++ b/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Responses + type response_mcp_list_tools_completed_event = + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_list_tools.completed" + } + + class ResponseMcpListToolsCompletedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.mcp_list_tools.completed" + + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.mcp_list_tools.completed" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_list_tools.completed" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs b/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs new file mode 100644 index 00000000..55e8ac00 --- /dev/null +++ b/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Responses + type response_mcp_list_tools_failed_event = + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_list_tools.failed" + } + + class ResponseMcpListToolsFailedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.mcp_list_tools.failed" + + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.mcp_list_tools.failed" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_list_tools.failed" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs b/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs new file mode 100644 index 00000000..2f8c7b30 --- /dev/null +++ b/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs @@ -0,0 +1,37 @@ +module OpenAI + module Models + module Responses + type response_mcp_list_tools_in_progress_event = + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_list_tools.in_progress" + } + + class ResponseMcpListToolsInProgressEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.mcp_list_tools.in_progress" + + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.mcp_list_tools.in_progress" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.mcp_list_tools.in_progress" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_output_audio.rbs b/sig/openai/models/responses/response_output_audio.rbs index 79e1d98c..0a8d8e3a 100644 --- a/sig/openai/models/responses/response_output_audio.rbs +++ b/sig/openai/models/responses/response_output_audio.rbs @@ -4,21 +4,24 @@ module OpenAI type response_output_audio = { data: String, transcript: String, type: :output_audio } - class ResponseOutputAudio < OpenAI::BaseModel + class ResponseOutputAudio < OpenAI::Internal::Type::BaseModel attr_accessor data: String attr_accessor transcript: String attr_accessor type: :output_audio - def initialize: - (data: String, transcript: String, type: :output_audio) -> void - | ( - ?OpenAI::Models::Responses::response_output_audio - | OpenAI::BaseModel data - ) -> void + def initialize: ( + data: String, + transcript: String, + ?type: :output_audio + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_output_audio + def to_hash: -> { + data: String, + transcript: String, + type: :output_audio + } end end end diff --git a/sig/openai/models/responses/response_output_item.rbs b/sig/openai/models/responses/response_output_item.rbs index cb441e32..13b7bedc 100644 --- a/sig/openai/models/responses/response_output_item.rbs +++ b/sig/openai/models/responses/response_output_item.rbs @@ -2,15 +2,313 @@ module OpenAI module Models module Responses type response_output_item = - OpenAI::Models::Responses::ResponseOutputMessage - | OpenAI::Models::Responses::ResponseFileSearchToolCall - | OpenAI::Models::Responses::ResponseFunctionToolCall - | OpenAI::Models::Responses::ResponseFunctionWebSearch - | OpenAI::Models::Responses::ResponseComputerToolCall - | OpenAI::Models::Responses::ResponseReasoningItem - - class ResponseOutputItem < OpenAI::Union - private def self.variants: -> [[:message, OpenAI::Models::Responses::ResponseOutputMessage], [:file_search_call, OpenAI::Models::Responses::ResponseFileSearchToolCall], [:function_call, OpenAI::Models::Responses::ResponseFunctionToolCall], [:web_search_call, OpenAI::Models::Responses::ResponseFunctionWebSearch], [:computer_call, OpenAI::Models::Responses::ResponseComputerToolCall], [:reasoning, OpenAI::Models::Responses::ResponseReasoningItem]] + OpenAI::Responses::ResponseOutputMessage + | OpenAI::Responses::ResponseFileSearchToolCall + | OpenAI::Responses::ResponseFunctionToolCall + | OpenAI::Responses::ResponseFunctionWebSearch + | OpenAI::Responses::ResponseComputerToolCall + | OpenAI::Responses::ResponseReasoningItem + | OpenAI::Responses::ResponseOutputItem::ImageGenerationCall + | OpenAI::Responses::ResponseCodeInterpreterToolCall + | OpenAI::Responses::ResponseOutputItem::LocalShellCall + | OpenAI::Responses::ResponseOutputItem::McpCall + | OpenAI::Responses::ResponseOutputItem::McpListTools + | OpenAI::Responses::ResponseOutputItem::McpApprovalRequest + | OpenAI::Responses::ResponseCustomToolCall + + module ResponseOutputItem + extend OpenAI::Internal::Type::Union + + type image_generation_call = + { + id: String, + result: String?, + status: OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall::status, + type: :image_generation_call + } + + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor result: String? + + attr_accessor status: OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall::status + + attr_accessor type: :image_generation_call + + def initialize: ( + id: String, + result: String?, + status: OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall::status, + ?type: :image_generation_call + ) -> void + + def to_hash: -> { + id: String, + result: String?, + status: OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall::status, + type: :image_generation_call + } + + type status = :in_progress | :completed | :generating | :failed + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + GENERATING: :generating + FAILED: :failed + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall::status] + end + end + + type local_shell_call = + { + id: String, + action: OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::status, + type: :local_shell_call + } + + class LocalShellCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor action: OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action + + attr_accessor call_id: String + + attr_accessor status: OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::status + + attr_accessor type: :local_shell_call + + def initialize: ( + id: String, + action: OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::status, + ?type: :local_shell_call + ) -> void + + def to_hash: -> { + id: String, + action: OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::status, + type: :local_shell_call + } + + type action = + { + command: ::Array[String], + env: ::Hash[Symbol, String], + type: :exec, + timeout_ms: Integer?, + user: String?, + working_directory: String? + } + + class Action < OpenAI::Internal::Type::BaseModel + attr_accessor command: ::Array[String] + + attr_accessor env: ::Hash[Symbol, String] + + attr_accessor type: :exec + + attr_accessor timeout_ms: Integer? + + attr_accessor user: String? + + attr_accessor working_directory: String? + + def initialize: ( + command: ::Array[String], + env: ::Hash[Symbol, String], + ?timeout_ms: Integer?, + ?user: String?, + ?working_directory: String?, + ?type: :exec + ) -> void + + def to_hash: -> { + command: ::Array[String], + env: ::Hash[Symbol, String], + type: :exec, + timeout_ms: Integer?, + user: String?, + working_directory: String? + } + end + + type status = :in_progress | :completed | :incomplete + + module Status + extend OpenAI::Internal::Type::Enum + + IN_PROGRESS: :in_progress + COMPLETED: :completed + INCOMPLETE: :incomplete + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::status] + end + end + + type mcp_call = + { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_call, + error: String?, + output: String? + } + + class McpCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor arguments: String + + attr_accessor name: String + + attr_accessor server_label: String + + attr_accessor type: :mcp_call + + attr_accessor error: String? + + attr_accessor output: String? + + def initialize: ( + id: String, + arguments: String, + name: String, + server_label: String, + ?error: String?, + ?output: String?, + ?type: :mcp_call + ) -> void + + def to_hash: -> { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_call, + error: String?, + output: String? + } + end + + type mcp_list_tools = + { + id: String, + server_label: String, + tools: ::Array[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool], + type: :mcp_list_tools, + error: String? + } + + class McpListTools < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor server_label: String + + attr_accessor tools: ::Array[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool] + + attr_accessor type: :mcp_list_tools + + attr_accessor error: String? + + def initialize: ( + id: String, + server_label: String, + tools: ::Array[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool], + ?error: String?, + ?type: :mcp_list_tools + ) -> void + + def to_hash: -> { + id: String, + server_label: String, + tools: ::Array[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool], + type: :mcp_list_tools, + error: String? + } + + type tool = + { + input_schema: top, + name: String, + annotations: top?, + description: String? + } + + class Tool < OpenAI::Internal::Type::BaseModel + attr_accessor input_schema: top + + attr_accessor name: String + + attr_accessor annotations: top? + + attr_accessor description: String? + + def initialize: ( + input_schema: top, + name: String, + ?annotations: top?, + ?description: String? + ) -> void + + def to_hash: -> { + input_schema: top, + name: String, + annotations: top?, + description: String? + } + end + end + + type mcp_approval_request = + { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_approval_request + } + + class McpApprovalRequest < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor arguments: String + + attr_accessor name: String + + attr_accessor server_label: String + + attr_accessor type: :mcp_approval_request + + def initialize: ( + id: String, + arguments: String, + name: String, + server_label: String, + ?type: :mcp_approval_request + ) -> void + + def to_hash: -> { + id: String, + arguments: String, + name: String, + server_label: String, + type: :mcp_approval_request + } + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::response_output_item] end end end diff --git a/sig/openai/models/responses/response_output_item_added_event.rbs b/sig/openai/models/responses/response_output_item_added_event.rbs index a8576913..25781321 100644 --- a/sig/openai/models/responses/response_output_item_added_event.rbs +++ b/sig/openai/models/responses/response_output_item_added_event.rbs @@ -5,28 +5,32 @@ module OpenAI { item: OpenAI::Models::Responses::response_output_item, output_index: Integer, + sequence_number: Integer, type: :"response.output_item.added" } - class ResponseOutputItemAddedEvent < OpenAI::BaseModel + class ResponseOutputItemAddedEvent < OpenAI::Internal::Type::BaseModel attr_accessor item: OpenAI::Models::Responses::response_output_item attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.output_item.added" - def initialize: - ( - item: OpenAI::Models::Responses::response_output_item, - output_index: Integer, - type: :"response.output_item.added" - ) -> void - | ( - ?OpenAI::Models::Responses::response_output_item_added_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item: OpenAI::Models::Responses::response_output_item, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.output_item.added" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_output_item_added_event + def to_hash: -> { + item: OpenAI::Models::Responses::response_output_item, + output_index: Integer, + sequence_number: Integer, + type: :"response.output_item.added" + } end end end diff --git a/sig/openai/models/responses/response_output_item_done_event.rbs b/sig/openai/models/responses/response_output_item_done_event.rbs index 9f91df23..3012b2ab 100644 --- a/sig/openai/models/responses/response_output_item_done_event.rbs +++ b/sig/openai/models/responses/response_output_item_done_event.rbs @@ -5,28 +5,32 @@ module OpenAI { item: OpenAI::Models::Responses::response_output_item, output_index: Integer, + sequence_number: Integer, type: :"response.output_item.done" } - class ResponseOutputItemDoneEvent < OpenAI::BaseModel + class ResponseOutputItemDoneEvent < OpenAI::Internal::Type::BaseModel attr_accessor item: OpenAI::Models::Responses::response_output_item attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.output_item.done" - def initialize: - ( - item: OpenAI::Models::Responses::response_output_item, - output_index: Integer, - type: :"response.output_item.done" - ) -> void - | ( - ?OpenAI::Models::Responses::response_output_item_done_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item: OpenAI::Models::Responses::response_output_item, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.output_item.done" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_output_item_done_event + def to_hash: -> { + item: OpenAI::Models::Responses::response_output_item, + output_index: Integer, + sequence_number: Integer, + type: :"response.output_item.done" + } end end end diff --git a/sig/openai/models/responses/response_output_message.rbs b/sig/openai/models/responses/response_output_message.rbs index dc20c3b5..223256b8 100644 --- a/sig/openai/models/responses/response_output_message.rbs +++ b/sig/openai/models/responses/response_output_message.rbs @@ -10,7 +10,7 @@ module OpenAI type: :message } - class ResponseOutputMessage < OpenAI::BaseModel + class ResponseOutputMessage < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor content: ::Array[OpenAI::Models::Responses::ResponseOutputMessage::content] @@ -21,37 +21,42 @@ module OpenAI attr_accessor type: :message - def initialize: - ( - id: String, - content: ::Array[OpenAI::Models::Responses::ResponseOutputMessage::content], - status: OpenAI::Models::Responses::ResponseOutputMessage::status, - role: :assistant, - type: :message - ) -> void - | ( - ?OpenAI::Models::Responses::response_output_message - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_output_message + def initialize: ( + id: String, + content: ::Array[OpenAI::Models::Responses::ResponseOutputMessage::content], + status: OpenAI::Models::Responses::ResponseOutputMessage::status, + ?role: :assistant, + ?type: :message + ) -> void + + def to_hash: -> { + id: String, + content: ::Array[OpenAI::Models::Responses::ResponseOutputMessage::content], + role: :assistant, + status: OpenAI::Models::Responses::ResponseOutputMessage::status, + type: :message + } type content = - OpenAI::Models::Responses::ResponseOutputText - | OpenAI::Models::Responses::ResponseOutputRefusal + OpenAI::Responses::ResponseOutputText + | OpenAI::Responses::ResponseOutputRefusal + + module Content + extend OpenAI::Internal::Type::Union - class Content < OpenAI::Union - private def self.variants: -> [[:output_text, OpenAI::Models::Responses::ResponseOutputText], [:refusal, OpenAI::Models::Responses::ResponseOutputRefusal]] + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseOutputMessage::content] end type status = :in_progress | :completed | :incomplete - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress COMPLETED: :completed INCOMPLETE: :incomplete - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseOutputMessage::status] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseOutputMessage::status] end end end diff --git a/sig/openai/models/responses/response_output_refusal.rbs b/sig/openai/models/responses/response_output_refusal.rbs index adbcb084..e2347cc1 100644 --- a/sig/openai/models/responses/response_output_refusal.rbs +++ b/sig/openai/models/responses/response_output_refusal.rbs @@ -3,19 +3,14 @@ module OpenAI module Responses type response_output_refusal = { refusal: String, type: :refusal } - class ResponseOutputRefusal < OpenAI::BaseModel + class ResponseOutputRefusal < OpenAI::Internal::Type::BaseModel attr_accessor refusal: String attr_accessor type: :refusal - def initialize: - (refusal: String, type: :refusal) -> void - | ( - ?OpenAI::Models::Responses::response_output_refusal - | OpenAI::BaseModel data - ) -> void + def initialize: (refusal: String, ?type: :refusal) -> void - def to_hash: -> OpenAI::Models::Responses::response_output_refusal + def to_hash: -> { refusal: String, type: :refusal } end end end diff --git a/sig/openai/models/responses/response_output_text.rbs b/sig/openai/models/responses/response_output_text.rbs index 6b128276..c1ad5888 100644 --- a/sig/openai/models/responses/response_output_text.rbs +++ b/sig/openai/models/responses/response_output_text.rbs @@ -5,53 +5,76 @@ module OpenAI { annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation], text: String, - type: :output_text + type: :output_text, + logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob] } - class ResponseOutputText < OpenAI::BaseModel + class ResponseOutputText < OpenAI::Internal::Type::BaseModel attr_accessor annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation] attr_accessor text: String attr_accessor type: :output_text - def initialize: - ( - annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation], - text: String, - type: :output_text - ) -> void - | ( - ?OpenAI::Models::Responses::response_output_text - | OpenAI::BaseModel data - ) -> void + attr_reader logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob]? + + def logprobs=: ( + ::Array[OpenAI::Responses::ResponseOutputText::Logprob] + ) -> ::Array[OpenAI::Responses::ResponseOutputText::Logprob] - def to_hash: -> OpenAI::Models::Responses::response_output_text + def initialize: ( + annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation], + text: String, + ?logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob], + ?type: :output_text + ) -> void + + def to_hash: -> { + annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation], + text: String, + type: :output_text, + logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob] + } type annotation = - OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation - | OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation - | OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath + OpenAI::Responses::ResponseOutputText::Annotation::FileCitation + | OpenAI::Responses::ResponseOutputText::Annotation::URLCitation + | OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation + | OpenAI::Responses::ResponseOutputText::Annotation::FilePath + + module Annotation + extend OpenAI::Internal::Type::Union - class Annotation < OpenAI::Union type file_citation = - { file_id: String, index: Integer, type: :file_citation } + { + file_id: String, + filename: String, + index: Integer, + type: :file_citation + } - class FileCitation < OpenAI::BaseModel + class FileCitation < OpenAI::Internal::Type::BaseModel attr_accessor file_id: String + attr_accessor filename: String + attr_accessor index: Integer attr_accessor type: :file_citation - def initialize: - (file_id: String, index: Integer, type: :file_citation) -> void - | ( - ?OpenAI::Models::Responses::ResponseOutputText::Annotation::file_citation - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseOutputText::Annotation::file_citation + def initialize: ( + file_id: String, + filename: String, + index: Integer, + ?type: :file_citation + ) -> void + + def to_hash: -> { + file_id: String, + filename: String, + index: Integer, + type: :file_citation + } end type url_citation = @@ -63,7 +86,7 @@ module OpenAI url: String } - class URLCitation < OpenAI::BaseModel + class URLCitation < OpenAI::Internal::Type::BaseModel attr_accessor end_index: Integer attr_accessor start_index: Integer @@ -74,42 +97,143 @@ module OpenAI attr_accessor url: String - def initialize: - ( - end_index: Integer, - start_index: Integer, - title: String, - url: String, - type: :url_citation - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseOutputText::Annotation::url_citation - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseOutputText::Annotation::url_citation + def initialize: ( + end_index: Integer, + start_index: Integer, + title: String, + url: String, + ?type: :url_citation + ) -> void + + def to_hash: -> { + end_index: Integer, + start_index: Integer, + title: String, + type: :url_citation, + url: String + } + end + + type container_file_citation = + { + container_id: String, + end_index: Integer, + file_id: String, + filename: String, + start_index: Integer, + type: :container_file_citation + } + + class ContainerFileCitation < OpenAI::Internal::Type::BaseModel + attr_accessor container_id: String + + attr_accessor end_index: Integer + + attr_accessor file_id: String + + attr_accessor filename: String + + attr_accessor start_index: Integer + + attr_accessor type: :container_file_citation + + def initialize: ( + container_id: String, + end_index: Integer, + file_id: String, + filename: String, + start_index: Integer, + ?type: :container_file_citation + ) -> void + + def to_hash: -> { + container_id: String, + end_index: Integer, + file_id: String, + filename: String, + start_index: Integer, + type: :container_file_citation + } end type file_path = { file_id: String, index: Integer, type: :file_path } - class FilePath < OpenAI::BaseModel + class FilePath < OpenAI::Internal::Type::BaseModel attr_accessor file_id: String attr_accessor index: Integer attr_accessor type: :file_path - def initialize: - (file_id: String, index: Integer, type: :file_path) -> void - | ( - ?OpenAI::Models::Responses::ResponseOutputText::Annotation::file_path - | OpenAI::BaseModel data - ) -> void + def initialize: ( + file_id: String, + index: Integer, + ?type: :file_path + ) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseOutputText::Annotation::file_path + def to_hash: -> { + file_id: String, + index: Integer, + type: :file_path + } end - private def self.variants: -> [[:file_citation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation], [:url_citation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation], [:file_path, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath]] + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation] + end + + type logprob = + { + token: String, + bytes: ::Array[Integer], + logprob: Float, + top_logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob] + } + + class Logprob < OpenAI::Internal::Type::BaseModel + attr_accessor token: String + + attr_accessor bytes: ::Array[Integer] + + attr_accessor logprob: Float + + attr_accessor top_logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob] + + def initialize: ( + token: String, + bytes: ::Array[Integer], + logprob: Float, + top_logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob] + ) -> void + + def to_hash: -> { + token: String, + bytes: ::Array[Integer], + logprob: Float, + top_logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob] + } + + type top_logprob = + { token: String, bytes: ::Array[Integer], logprob: Float } + + class TopLogprob < OpenAI::Internal::Type::BaseModel + attr_accessor token: String + + attr_accessor bytes: ::Array[Integer] + + attr_accessor logprob: Float + + def initialize: ( + token: String, + bytes: ::Array[Integer], + logprob: Float + ) -> void + + def to_hash: -> { + token: String, + bytes: ::Array[Integer], + logprob: Float + } + end end end end diff --git a/sig/openai/models/responses/response_output_text_annotation_added_event.rbs b/sig/openai/models/responses/response_output_text_annotation_added_event.rbs new file mode 100644 index 00000000..177650c3 --- /dev/null +++ b/sig/openai/models/responses/response_output_text_annotation_added_event.rbs @@ -0,0 +1,52 @@ +module OpenAI + module Models + module Responses + type response_output_text_annotation_added_event = + { + annotation: top, + annotation_index: Integer, + content_index: Integer, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.output_text.annotation.added" + } + + class ResponseOutputTextAnnotationAddedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor annotation: top + + attr_accessor annotation_index: Integer + + attr_accessor content_index: Integer + + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.output_text.annotation.added" + + def initialize: ( + annotation: top, + annotation_index: Integer, + content_index: Integer, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.output_text.annotation.added" + ) -> void + + def to_hash: -> { + annotation: top, + annotation_index: Integer, + content_index: Integer, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.output_text.annotation.added" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_prompt.rbs b/sig/openai/models/responses/response_prompt.rbs new file mode 100644 index 00000000..0ccc20f8 --- /dev/null +++ b/sig/openai/models/responses/response_prompt.rbs @@ -0,0 +1,44 @@ +module OpenAI + module Models + module Responses + type response_prompt = + { + id: String, + variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?, + version: String? + } + + class ResponsePrompt < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]? + + attr_accessor version: String? + + def initialize: ( + id: String, + ?variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?, + ?version: String? + ) -> void + + def to_hash: -> { + id: String, + variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?, + version: String? + } + + type variable = + String + | OpenAI::Responses::ResponseInputText + | OpenAI::Responses::ResponseInputImage + | OpenAI::Responses::ResponseInputFile + + module Variable + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponsePrompt::variable] + end + end + end + end +end diff --git a/sig/openai/models/responses/response_queued_event.rbs b/sig/openai/models/responses/response_queued_event.rbs new file mode 100644 index 00000000..4db7787a --- /dev/null +++ b/sig/openai/models/responses/response_queued_event.rbs @@ -0,0 +1,32 @@ +module OpenAI + module Models + module Responses + type response_queued_event = + { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: :"response.queued" + } + + class ResponseQueuedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor response: OpenAI::Responses::Response + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.queued" + + def initialize: ( + response: OpenAI::Responses::Response, + sequence_number: Integer, + ?type: :"response.queued" + ) -> void + + def to_hash: -> { + response: OpenAI::Responses::Response, + sequence_number: Integer, + type: :"response.queued" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_reasoning_item.rbs b/sig/openai/models/responses/response_reasoning_item.rbs index b9b01efa..e4e79c73 100644 --- a/sig/openai/models/responses/response_reasoning_item.rbs +++ b/sig/openai/models/responses/response_reasoning_item.rbs @@ -4,63 +4,86 @@ module OpenAI type response_reasoning_item = { id: String, - summary: ::Array[OpenAI::Models::Responses::ResponseReasoningItem::Summary], + summary: ::Array[OpenAI::Responses::ResponseReasoningItem::Summary], type: :reasoning, + content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content], + encrypted_content: String?, status: OpenAI::Models::Responses::ResponseReasoningItem::status } - class ResponseReasoningItem < OpenAI::BaseModel + class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel attr_accessor id: String - attr_accessor summary: ::Array[OpenAI::Models::Responses::ResponseReasoningItem::Summary] + attr_accessor summary: ::Array[OpenAI::Responses::ResponseReasoningItem::Summary] attr_accessor type: :reasoning + attr_reader content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content]? + + def content=: ( + ::Array[OpenAI::Responses::ResponseReasoningItem::Content] + ) -> ::Array[OpenAI::Responses::ResponseReasoningItem::Content] + + attr_accessor encrypted_content: String? + attr_reader status: OpenAI::Models::Responses::ResponseReasoningItem::status? def status=: ( OpenAI::Models::Responses::ResponseReasoningItem::status ) -> OpenAI::Models::Responses::ResponseReasoningItem::status - def initialize: - ( - id: String, - summary: ::Array[OpenAI::Models::Responses::ResponseReasoningItem::Summary], - status: OpenAI::Models::Responses::ResponseReasoningItem::status, - type: :reasoning - ) -> void - | ( - ?OpenAI::Models::Responses::response_reasoning_item - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + summary: ::Array[OpenAI::Responses::ResponseReasoningItem::Summary], + ?content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content], + ?encrypted_content: String?, + ?status: OpenAI::Models::Responses::ResponseReasoningItem::status, + ?type: :reasoning + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_reasoning_item + def to_hash: -> { + id: String, + summary: ::Array[OpenAI::Responses::ResponseReasoningItem::Summary], + type: :reasoning, + content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content], + encrypted_content: String?, + status: OpenAI::Models::Responses::ResponseReasoningItem::status + } type summary = { text: String, type: :summary_text } - class Summary < OpenAI::BaseModel + class Summary < OpenAI::Internal::Type::BaseModel attr_accessor text: String attr_accessor type: :summary_text - def initialize: - (text: String, type: :summary_text) -> void - | ( - ?OpenAI::Models::Responses::ResponseReasoningItem::summary - | OpenAI::BaseModel data - ) -> void + def initialize: (text: String, ?type: :summary_text) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseReasoningItem::summary + def to_hash: -> { text: String, type: :summary_text } + end + + type content = { text: String, type: :reasoning_text } + + class Content < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :reasoning_text + + def initialize: (text: String, ?type: :reasoning_text) -> void + + def to_hash: -> { text: String, type: :reasoning_text } end type status = :in_progress | :completed | :incomplete - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress COMPLETED: :completed INCOMPLETE: :incomplete - def self.values: -> ::Array[OpenAI::Models::Responses::ResponseReasoningItem::status] + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseReasoningItem::status] end end end diff --git a/sig/openai/models/responses/response_reasoning_summary_part_added_event.rbs b/sig/openai/models/responses/response_reasoning_summary_part_added_event.rbs new file mode 100644 index 00000000..597bae6f --- /dev/null +++ b/sig/openai/models/responses/response_reasoning_summary_part_added_event.rbs @@ -0,0 +1,59 @@ +module OpenAI + module Models + module Responses + type response_reasoning_summary_part_added_event = + { + item_id: String, + output_index: Integer, + part: OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part, + sequence_number: Integer, + summary_index: Integer, + type: :"response.reasoning_summary_part.added" + } + + class ResponseReasoningSummaryPartAddedEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor part: OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part + + attr_accessor sequence_number: Integer + + attr_accessor summary_index: Integer + + attr_accessor type: :"response.reasoning_summary_part.added" + + def initialize: ( + item_id: String, + output_index: Integer, + part: OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part, + sequence_number: Integer, + summary_index: Integer, + ?type: :"response.reasoning_summary_part.added" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + part: OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part, + sequence_number: Integer, + summary_index: Integer, + type: :"response.reasoning_summary_part.added" + } + + type part = { text: String, type: :summary_text } + + class Part < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :summary_text + + def initialize: (text: String, ?type: :summary_text) -> void + + def to_hash: -> { text: String, type: :summary_text } + end + end + end + end +end diff --git a/sig/openai/models/responses/response_reasoning_summary_part_done_event.rbs b/sig/openai/models/responses/response_reasoning_summary_part_done_event.rbs new file mode 100644 index 00000000..79c25aa5 --- /dev/null +++ b/sig/openai/models/responses/response_reasoning_summary_part_done_event.rbs @@ -0,0 +1,59 @@ +module OpenAI + module Models + module Responses + type response_reasoning_summary_part_done_event = + { + item_id: String, + output_index: Integer, + part: OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part, + sequence_number: Integer, + summary_index: Integer, + type: :"response.reasoning_summary_part.done" + } + + class ResponseReasoningSummaryPartDoneEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor part: OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part + + attr_accessor sequence_number: Integer + + attr_accessor summary_index: Integer + + attr_accessor type: :"response.reasoning_summary_part.done" + + def initialize: ( + item_id: String, + output_index: Integer, + part: OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part, + sequence_number: Integer, + summary_index: Integer, + ?type: :"response.reasoning_summary_part.done" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + part: OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part, + sequence_number: Integer, + summary_index: Integer, + type: :"response.reasoning_summary_part.done" + } + + type part = { text: String, type: :summary_text } + + class Part < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :summary_text + + def initialize: (text: String, ?type: :summary_text) -> void + + def to_hash: -> { text: String, type: :summary_text } + end + end + end + end +end diff --git a/sig/openai/models/responses/response_reasoning_summary_text_delta_event.rbs b/sig/openai/models/responses/response_reasoning_summary_text_delta_event.rbs new file mode 100644 index 00000000..8d39bef7 --- /dev/null +++ b/sig/openai/models/responses/response_reasoning_summary_text_delta_event.rbs @@ -0,0 +1,47 @@ +module OpenAI + module Models + module Responses + type response_reasoning_summary_text_delta_event = + { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + summary_index: Integer, + type: :"response.reasoning_summary_text.delta" + } + + class ResponseReasoningSummaryTextDeltaEvent < OpenAI::Internal::Type::BaseModel + attr_accessor delta: String + + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor summary_index: Integer + + attr_accessor type: :"response.reasoning_summary_text.delta" + + def initialize: ( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + summary_index: Integer, + ?type: :"response.reasoning_summary_text.delta" + ) -> void + + def to_hash: -> { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + summary_index: Integer, + type: :"response.reasoning_summary_text.delta" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_reasoning_summary_text_done_event.rbs b/sig/openai/models/responses/response_reasoning_summary_text_done_event.rbs new file mode 100644 index 00000000..50cffece --- /dev/null +++ b/sig/openai/models/responses/response_reasoning_summary_text_done_event.rbs @@ -0,0 +1,47 @@ +module OpenAI + module Models + module Responses + type response_reasoning_summary_text_done_event = + { + item_id: String, + output_index: Integer, + sequence_number: Integer, + summary_index: Integer, + text: String, + type: :"response.reasoning_summary_text.done" + } + + class ResponseReasoningSummaryTextDoneEvent < OpenAI::Internal::Type::BaseModel + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor summary_index: Integer + + attr_accessor text: String + + attr_accessor type: :"response.reasoning_summary_text.done" + + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + summary_index: Integer, + text: String, + ?type: :"response.reasoning_summary_text.done" + ) -> void + + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + summary_index: Integer, + text: String, + type: :"response.reasoning_summary_text.done" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_reasoning_text_delta_event.rbs b/sig/openai/models/responses/response_reasoning_text_delta_event.rbs new file mode 100644 index 00000000..a8d33a4f --- /dev/null +++ b/sig/openai/models/responses/response_reasoning_text_delta_event.rbs @@ -0,0 +1,47 @@ +module OpenAI + module Models + module Responses + type response_reasoning_text_delta_event = + { + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.reasoning_text.delta" + } + + class ResponseReasoningTextDeltaEvent < OpenAI::Internal::Type::BaseModel + attr_accessor content_index: Integer + + attr_accessor delta: String + + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.reasoning_text.delta" + + def initialize: ( + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.reasoning_text.delta" + ) -> void + + def to_hash: -> { + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.reasoning_text.delta" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_reasoning_text_done_event.rbs b/sig/openai/models/responses/response_reasoning_text_done_event.rbs new file mode 100644 index 00000000..9e3712b0 --- /dev/null +++ b/sig/openai/models/responses/response_reasoning_text_done_event.rbs @@ -0,0 +1,47 @@ +module OpenAI + module Models + module Responses + type response_reasoning_text_done_event = + { + content_index: Integer, + item_id: String, + output_index: Integer, + sequence_number: Integer, + text: String, + type: :"response.reasoning_text.done" + } + + class ResponseReasoningTextDoneEvent < OpenAI::Internal::Type::BaseModel + attr_accessor content_index: Integer + + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor text: String + + attr_accessor type: :"response.reasoning_text.done" + + def initialize: ( + content_index: Integer, + item_id: String, + output_index: Integer, + sequence_number: Integer, + text: String, + ?type: :"response.reasoning_text.done" + ) -> void + + def to_hash: -> { + content_index: Integer, + item_id: String, + output_index: Integer, + sequence_number: Integer, + text: String, + type: :"response.reasoning_text.done" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_refusal_delta_event.rbs b/sig/openai/models/responses/response_refusal_delta_event.rbs index eb50a0ed..4985d6bd 100644 --- a/sig/openai/models/responses/response_refusal_delta_event.rbs +++ b/sig/openai/models/responses/response_refusal_delta_event.rbs @@ -7,10 +7,11 @@ module OpenAI delta: String, item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.refusal.delta" } - class ResponseRefusalDeltaEvent < OpenAI::BaseModel + class ResponseRefusalDeltaEvent < OpenAI::Internal::Type::BaseModel attr_accessor content_index: Integer attr_accessor delta: String @@ -19,22 +20,27 @@ module OpenAI attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.refusal.delta" - def initialize: - ( - content_index: Integer, - delta: String, - item_id: String, - output_index: Integer, - type: :"response.refusal.delta" - ) -> void - | ( - ?OpenAI::Models::Responses::response_refusal_delta_event - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_refusal_delta_event + def initialize: ( + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.refusal.delta" + ) -> void + + def to_hash: -> { + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.refusal.delta" + } end end end diff --git a/sig/openai/models/responses/response_refusal_done_event.rbs b/sig/openai/models/responses/response_refusal_done_event.rbs index 0fbfff6e..ce78512b 100644 --- a/sig/openai/models/responses/response_refusal_done_event.rbs +++ b/sig/openai/models/responses/response_refusal_done_event.rbs @@ -7,10 +7,11 @@ module OpenAI item_id: String, output_index: Integer, refusal: String, + sequence_number: Integer, type: :"response.refusal.done" } - class ResponseRefusalDoneEvent < OpenAI::BaseModel + class ResponseRefusalDoneEvent < OpenAI::Internal::Type::BaseModel attr_accessor content_index: Integer attr_accessor item_id: String @@ -19,22 +20,27 @@ module OpenAI attr_accessor refusal: String + attr_accessor sequence_number: Integer + attr_accessor type: :"response.refusal.done" - def initialize: - ( - content_index: Integer, - item_id: String, - output_index: Integer, - refusal: String, - type: :"response.refusal.done" - ) -> void - | ( - ?OpenAI::Models::Responses::response_refusal_done_event - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_refusal_done_event + def initialize: ( + content_index: Integer, + item_id: String, + output_index: Integer, + refusal: String, + sequence_number: Integer, + ?type: :"response.refusal.done" + ) -> void + + def to_hash: -> { + content_index: Integer, + item_id: String, + output_index: Integer, + refusal: String, + sequence_number: Integer, + type: :"response.refusal.done" + } end end end diff --git a/sig/openai/models/responses/response_retrieve_params.rbs b/sig/openai/models/responses/response_retrieve_params.rbs index 23530635..66b490f5 100644 --- a/sig/openai/models/responses/response_retrieve_params.rbs +++ b/sig/openai/models/responses/response_retrieve_params.rbs @@ -2,12 +2,16 @@ module OpenAI module Models module Responses type response_retrieve_params = - { include: ::Array[OpenAI::Models::Responses::response_includable] } - & OpenAI::request_parameters + { + include: ::Array[OpenAI::Models::Responses::response_includable], + include_obfuscation: bool, + starting_after: Integer + } + & OpenAI::Internal::Type::request_parameters - class ResponseRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class ResponseRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader include: ::Array[OpenAI::Models::Responses::response_includable]? @@ -15,17 +19,27 @@ module OpenAI ::Array[OpenAI::Models::Responses::response_includable] ) -> ::Array[OpenAI::Models::Responses::response_includable] - def initialize: - ( - include: ::Array[OpenAI::Models::Responses::response_includable], - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::Responses::response_retrieve_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_retrieve_params + attr_reader include_obfuscation: bool? + + def include_obfuscation=: (bool) -> bool + + attr_reader starting_after: Integer? + + def starting_after=: (Integer) -> Integer + + def initialize: ( + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?include_obfuscation: bool, + ?starting_after: Integer, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + include: ::Array[OpenAI::Models::Responses::response_includable], + include_obfuscation: bool, + starting_after: Integer, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/responses/response_status.rbs b/sig/openai/models/responses/response_status.rbs index 17349a0c..e54cd85f 100644 --- a/sig/openai/models/responses/response_status.rbs +++ b/sig/openai/models/responses/response_status.rbs @@ -1,15 +1,20 @@ module OpenAI module Models module Responses - type response_status = :completed | :failed | :in_progress | :incomplete + type response_status = + :completed | :failed | :in_progress | :cancelled | :queued | :incomplete + + module ResponseStatus + extend OpenAI::Internal::Type::Enum - class ResponseStatus < OpenAI::Enum COMPLETED: :completed FAILED: :failed IN_PROGRESS: :in_progress + CANCELLED: :cancelled + QUEUED: :queued INCOMPLETE: :incomplete - def self.values: -> ::Array[OpenAI::Models::Responses::response_status] + def self?.values: -> ::Array[OpenAI::Models::Responses::response_status] end end end diff --git a/sig/openai/models/responses/response_stream_event.rbs b/sig/openai/models/responses/response_stream_event.rbs index 10d54108..b9a88468 100644 --- a/sig/openai/models/responses/response_stream_event.rbs +++ b/sig/openai/models/responses/response_stream_event.rbs @@ -2,41 +2,64 @@ module OpenAI module Models module Responses type response_stream_event = - OpenAI::Models::Responses::ResponseAudioDeltaEvent - | OpenAI::Models::Responses::ResponseAudioDoneEvent - | OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent - | OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent - | OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent - | OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent - | OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent - | OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent - | OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent - | OpenAI::Models::Responses::ResponseCompletedEvent - | OpenAI::Models::Responses::ResponseContentPartAddedEvent - | OpenAI::Models::Responses::ResponseContentPartDoneEvent - | OpenAI::Models::Responses::ResponseCreatedEvent - | OpenAI::Models::Responses::ResponseErrorEvent - | OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent - | OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent - | OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent - | OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent - | OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent - | OpenAI::Models::Responses::ResponseInProgressEvent - | OpenAI::Models::Responses::ResponseFailedEvent - | OpenAI::Models::Responses::ResponseIncompleteEvent - | OpenAI::Models::Responses::ResponseOutputItemAddedEvent - | OpenAI::Models::Responses::ResponseOutputItemDoneEvent - | OpenAI::Models::Responses::ResponseRefusalDeltaEvent - | OpenAI::Models::Responses::ResponseRefusalDoneEvent - | OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent - | OpenAI::Models::Responses::ResponseTextDeltaEvent - | OpenAI::Models::Responses::ResponseTextDoneEvent - | OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent - | OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent - | OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent + OpenAI::Responses::ResponseAudioDeltaEvent + | OpenAI::Responses::ResponseAudioDoneEvent + | OpenAI::Responses::ResponseAudioTranscriptDeltaEvent + | OpenAI::Responses::ResponseAudioTranscriptDoneEvent + | OpenAI::Responses::ResponseCodeInterpreterCallCodeDeltaEvent + | OpenAI::Responses::ResponseCodeInterpreterCallCodeDoneEvent + | OpenAI::Responses::ResponseCodeInterpreterCallCompletedEvent + | OpenAI::Responses::ResponseCodeInterpreterCallInProgressEvent + | OpenAI::Responses::ResponseCodeInterpreterCallInterpretingEvent + | OpenAI::Responses::ResponseCompletedEvent + | OpenAI::Responses::ResponseContentPartAddedEvent + | OpenAI::Responses::ResponseContentPartDoneEvent + | OpenAI::Responses::ResponseCreatedEvent + | OpenAI::Responses::ResponseErrorEvent + | OpenAI::Responses::ResponseFileSearchCallCompletedEvent + | OpenAI::Responses::ResponseFileSearchCallInProgressEvent + | OpenAI::Responses::ResponseFileSearchCallSearchingEvent + | OpenAI::Responses::ResponseFunctionCallArgumentsDeltaEvent + | OpenAI::Responses::ResponseFunctionCallArgumentsDoneEvent + | OpenAI::Responses::ResponseInProgressEvent + | OpenAI::Responses::ResponseFailedEvent + | OpenAI::Responses::ResponseIncompleteEvent + | OpenAI::Responses::ResponseOutputItemAddedEvent + | OpenAI::Responses::ResponseOutputItemDoneEvent + | OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent + | OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent + | OpenAI::Responses::ResponseReasoningSummaryTextDeltaEvent + | OpenAI::Responses::ResponseReasoningSummaryTextDoneEvent + | OpenAI::Responses::ResponseReasoningTextDeltaEvent + | OpenAI::Responses::ResponseReasoningTextDoneEvent + | OpenAI::Responses::ResponseRefusalDeltaEvent + | OpenAI::Responses::ResponseRefusalDoneEvent + | OpenAI::Responses::ResponseTextDeltaEvent + | OpenAI::Responses::ResponseTextDoneEvent + | OpenAI::Responses::ResponseWebSearchCallCompletedEvent + | OpenAI::Responses::ResponseWebSearchCallInProgressEvent + | OpenAI::Responses::ResponseWebSearchCallSearchingEvent + | OpenAI::Responses::ResponseImageGenCallCompletedEvent + | OpenAI::Responses::ResponseImageGenCallGeneratingEvent + | OpenAI::Responses::ResponseImageGenCallInProgressEvent + | OpenAI::Responses::ResponseImageGenCallPartialImageEvent + | OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent + | OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent + | OpenAI::Responses::ResponseMcpCallCompletedEvent + | OpenAI::Responses::ResponseMcpCallFailedEvent + | OpenAI::Responses::ResponseMcpCallInProgressEvent + | OpenAI::Responses::ResponseMcpListToolsCompletedEvent + | OpenAI::Responses::ResponseMcpListToolsFailedEvent + | OpenAI::Responses::ResponseMcpListToolsInProgressEvent + | OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent + | OpenAI::Responses::ResponseQueuedEvent + | OpenAI::Responses::ResponseCustomToolCallInputDeltaEvent + | OpenAI::Responses::ResponseCustomToolCallInputDoneEvent - class ResponseStreamEvent < OpenAI::Union - private def self.variants: -> [[:"response.audio.delta", OpenAI::Models::Responses::ResponseAudioDeltaEvent], [:"response.audio.done", OpenAI::Models::Responses::ResponseAudioDoneEvent], [:"response.audio.transcript.delta", OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent], [:"response.audio.transcript.done", OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent], [:"response.code_interpreter_call.code.delta", OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent], [:"response.code_interpreter_call.code.done", OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent], [:"response.code_interpreter_call.completed", OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent], [:"response.code_interpreter_call.in_progress", OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent], [:"response.code_interpreter_call.interpreting", OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent], [:"response.completed", OpenAI::Models::Responses::ResponseCompletedEvent], [:"response.content_part.added", OpenAI::Models::Responses::ResponseContentPartAddedEvent], [:"response.content_part.done", OpenAI::Models::Responses::ResponseContentPartDoneEvent], [:"response.created", OpenAI::Models::Responses::ResponseCreatedEvent], [:error, OpenAI::Models::Responses::ResponseErrorEvent], [:"response.file_search_call.completed", OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent], [:"response.file_search_call.in_progress", OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent], [:"response.file_search_call.searching", OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent], [:"response.function_call_arguments.delta", OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent], [:"response.function_call_arguments.done", OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent], [:"response.in_progress", OpenAI::Models::Responses::ResponseInProgressEvent], [:"response.failed", OpenAI::Models::Responses::ResponseFailedEvent], [:"response.incomplete", OpenAI::Models::Responses::ResponseIncompleteEvent], [:"response.output_item.added", OpenAI::Models::Responses::ResponseOutputItemAddedEvent], [:"response.output_item.done", OpenAI::Models::Responses::ResponseOutputItemDoneEvent], [:"response.refusal.delta", OpenAI::Models::Responses::ResponseRefusalDeltaEvent], [:"response.refusal.done", OpenAI::Models::Responses::ResponseRefusalDoneEvent], [:"response.output_text.annotation.added", OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent], [:"response.output_text.delta", OpenAI::Models::Responses::ResponseTextDeltaEvent], [:"response.output_text.done", OpenAI::Models::Responses::ResponseTextDoneEvent], [:"response.web_search_call.completed", OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent], [:"response.web_search_call.in_progress", OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent], [:"response.web_search_call.searching", OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent]] + module ResponseStreamEvent + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Responses::response_stream_event] end end end diff --git a/sig/openai/models/responses/response_text_annotation_delta_event.rbs b/sig/openai/models/responses/response_text_annotation_delta_event.rbs deleted file mode 100644 index f3f165b9..00000000 --- a/sig/openai/models/responses/response_text_annotation_delta_event.rbs +++ /dev/null @@ -1,129 +0,0 @@ -module OpenAI - module Models - module Responses - type response_text_annotation_delta_event = - { - annotation: OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::annotation, - annotation_index: Integer, - content_index: Integer, - item_id: String, - output_index: Integer, - type: :"response.output_text.annotation.added" - } - - class ResponseTextAnnotationDeltaEvent < OpenAI::BaseModel - attr_accessor annotation: OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::annotation - - attr_accessor annotation_index: Integer - - attr_accessor content_index: Integer - - attr_accessor item_id: String - - attr_accessor output_index: Integer - - attr_accessor type: :"response.output_text.annotation.added" - - def initialize: - ( - annotation: OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::annotation, - annotation_index: Integer, - content_index: Integer, - item_id: String, - output_index: Integer, - type: :"response.output_text.annotation.added" - ) -> void - | ( - ?OpenAI::Models::Responses::response_text_annotation_delta_event - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::response_text_annotation_delta_event - - type annotation = - OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation - | OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation - | OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath - - class Annotation < OpenAI::Union - type file_citation = - { file_id: String, index: Integer, type: :file_citation } - - class FileCitation < OpenAI::BaseModel - attr_accessor file_id: String - - attr_accessor index: Integer - - attr_accessor type: :file_citation - - def initialize: - (file_id: String, index: Integer, type: :file_citation) -> void - | ( - ?OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::file_citation - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::file_citation - end - - type url_citation = - { - end_index: Integer, - start_index: Integer, - title: String, - type: :url_citation, - url: String - } - - class URLCitation < OpenAI::BaseModel - attr_accessor end_index: Integer - - attr_accessor start_index: Integer - - attr_accessor title: String - - attr_accessor type: :url_citation - - attr_accessor url: String - - def initialize: - ( - end_index: Integer, - start_index: Integer, - title: String, - url: String, - type: :url_citation - ) -> void - | ( - ?OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::url_citation - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::url_citation - end - - type file_path = { file_id: String, index: Integer, type: :file_path } - - class FilePath < OpenAI::BaseModel - attr_accessor file_id: String - - attr_accessor index: Integer - - attr_accessor type: :file_path - - def initialize: - (file_id: String, index: Integer, type: :file_path) -> void - | ( - ?OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::file_path - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::file_path - end - - private def self.variants: -> [[:file_citation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FileCitation], [:url_citation, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::URLCitation], [:file_path, OpenAI::Models::Responses::ResponseTextAnnotationDeltaEvent::Annotation::FilePath]] - end - end - end - end -end diff --git a/sig/openai/models/responses/response_text_config.rbs b/sig/openai/models/responses/response_text_config.rbs index 0957217a..3f0a6608 100644 --- a/sig/openai/models/responses/response_text_config.rbs +++ b/sig/openai/models/responses/response_text_config.rbs @@ -2,25 +2,41 @@ module OpenAI module Models module Responses type response_text_config = - { format_: OpenAI::Models::Responses::response_format_text_config } + { + format_: OpenAI::Models::Responses::response_format_text_config, + verbosity: OpenAI::Models::Responses::ResponseTextConfig::verbosity? + } - class ResponseTextConfig < OpenAI::BaseModel + class ResponseTextConfig < OpenAI::Internal::Type::BaseModel attr_reader format_: OpenAI::Models::Responses::response_format_text_config? def format_=: ( OpenAI::Models::Responses::response_format_text_config ) -> OpenAI::Models::Responses::response_format_text_config - def initialize: - ( - format_: OpenAI::Models::Responses::response_format_text_config - ) -> void - | ( - ?OpenAI::Models::Responses::response_text_config - | OpenAI::BaseModel data - ) -> void + attr_accessor verbosity: OpenAI::Models::Responses::ResponseTextConfig::verbosity? - def to_hash: -> OpenAI::Models::Responses::response_text_config + def initialize: ( + ?format_: OpenAI::Models::Responses::response_format_text_config, + ?verbosity: OpenAI::Models::Responses::ResponseTextConfig::verbosity? + ) -> void + + def to_hash: -> { + format_: OpenAI::Models::Responses::response_format_text_config, + verbosity: OpenAI::Models::Responses::ResponseTextConfig::verbosity? + } + + type verbosity = :low | :medium | :high + + module Verbosity + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseTextConfig::verbosity] + end end end end diff --git a/sig/openai/models/responses/response_text_delta_event.rbs b/sig/openai/models/responses/response_text_delta_event.rbs index 5a692585..1069f71d 100644 --- a/sig/openai/models/responses/response_text_delta_event.rbs +++ b/sig/openai/models/responses/response_text_delta_event.rbs @@ -6,35 +6,93 @@ module OpenAI content_index: Integer, delta: String, item_id: String, + logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob], output_index: Integer, + sequence_number: Integer, type: :"response.output_text.delta" } - class ResponseTextDeltaEvent < OpenAI::BaseModel + class ResponseTextDeltaEvent < OpenAI::Internal::Type::BaseModel attr_accessor content_index: Integer attr_accessor delta: String attr_accessor item_id: String + attr_accessor logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob] + attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.output_text.delta" - def initialize: - ( - content_index: Integer, - delta: String, - item_id: String, - output_index: Integer, - type: :"response.output_text.delta" - ) -> void - | ( - ?OpenAI::Models::Responses::response_text_delta_event - | OpenAI::BaseModel data + def initialize: ( + content_index: Integer, + delta: String, + item_id: String, + logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob], + output_index: Integer, + sequence_number: Integer, + ?type: :"response.output_text.delta" + ) -> void + + def to_hash: -> { + content_index: Integer, + delta: String, + item_id: String, + logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob], + output_index: Integer, + sequence_number: Integer, + type: :"response.output_text.delta" + } + + type logprob = + { + token: String, + logprob: Float, + top_logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob] + } + + class Logprob < OpenAI::Internal::Type::BaseModel + attr_accessor token: String + + attr_accessor logprob: Float + + attr_reader top_logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]? + + def top_logprobs=: ( + ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob] + ) -> ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob] + + def initialize: ( + token: String, + logprob: Float, + ?top_logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob] ) -> void - def to_hash: -> OpenAI::Models::Responses::response_text_delta_event + def to_hash: -> { + token: String, + logprob: Float, + top_logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob] + } + + type top_logprob = { token: String, logprob: Float } + + class TopLogprob < OpenAI::Internal::Type::BaseModel + attr_reader token: String? + + def token=: (String) -> String + + attr_reader logprob: Float? + + def logprob=: (Float) -> Float + + def initialize: (?token: String, ?logprob: Float) -> void + + def to_hash: -> { token: String, logprob: Float } + end + end end end end diff --git a/sig/openai/models/responses/response_text_done_event.rbs b/sig/openai/models/responses/response_text_done_event.rbs index 8cda3224..7e42d6a5 100644 --- a/sig/openai/models/responses/response_text_done_event.rbs +++ b/sig/openai/models/responses/response_text_done_event.rbs @@ -5,36 +5,94 @@ module OpenAI { content_index: Integer, item_id: String, + logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob], output_index: Integer, + sequence_number: Integer, text: String, type: :"response.output_text.done" } - class ResponseTextDoneEvent < OpenAI::BaseModel + class ResponseTextDoneEvent < OpenAI::Internal::Type::BaseModel attr_accessor content_index: Integer attr_accessor item_id: String + attr_accessor logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob] + attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor text: String attr_accessor type: :"response.output_text.done" - def initialize: - ( - content_index: Integer, - item_id: String, - output_index: Integer, - text: String, - type: :"response.output_text.done" - ) -> void - | ( - ?OpenAI::Models::Responses::response_text_done_event - | OpenAI::BaseModel data + def initialize: ( + content_index: Integer, + item_id: String, + logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob], + output_index: Integer, + sequence_number: Integer, + text: String, + ?type: :"response.output_text.done" + ) -> void + + def to_hash: -> { + content_index: Integer, + item_id: String, + logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob], + output_index: Integer, + sequence_number: Integer, + text: String, + type: :"response.output_text.done" + } + + type logprob = + { + token: String, + logprob: Float, + top_logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob] + } + + class Logprob < OpenAI::Internal::Type::BaseModel + attr_accessor token: String + + attr_accessor logprob: Float + + attr_reader top_logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]? + + def top_logprobs=: ( + ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob] + ) -> ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob] + + def initialize: ( + token: String, + logprob: Float, + ?top_logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob] ) -> void - def to_hash: -> OpenAI::Models::Responses::response_text_done_event + def to_hash: -> { + token: String, + logprob: Float, + top_logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob] + } + + type top_logprob = { token: String, logprob: Float } + + class TopLogprob < OpenAI::Internal::Type::BaseModel + attr_reader token: String? + + def token=: (String) -> String + + attr_reader logprob: Float? + + def logprob=: (Float) -> Float + + def initialize: (?token: String, ?logprob: Float) -> void + + def to_hash: -> { token: String, logprob: Float } + end + end end end end diff --git a/sig/openai/models/responses/response_usage.rbs b/sig/openai/models/responses/response_usage.rbs index cca39c67..2245372b 100644 --- a/sig/openai/models/responses/response_usage.rbs +++ b/sig/openai/models/responses/response_usage.rbs @@ -4,46 +4,57 @@ module OpenAI type response_usage = { input_tokens: Integer, + input_tokens_details: OpenAI::Responses::ResponseUsage::InputTokensDetails, output_tokens: Integer, - output_tokens_details: OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails, + output_tokens_details: OpenAI::Responses::ResponseUsage::OutputTokensDetails, total_tokens: Integer } - class ResponseUsage < OpenAI::BaseModel + class ResponseUsage < OpenAI::Internal::Type::BaseModel attr_accessor input_tokens: Integer + attr_accessor input_tokens_details: OpenAI::Responses::ResponseUsage::InputTokensDetails + attr_accessor output_tokens: Integer - attr_accessor output_tokens_details: OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails + attr_accessor output_tokens_details: OpenAI::Responses::ResponseUsage::OutputTokensDetails attr_accessor total_tokens: Integer - def initialize: - ( - input_tokens: Integer, - output_tokens: Integer, - output_tokens_details: OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails, - total_tokens: Integer - ) -> void - | ( - ?OpenAI::Models::Responses::response_usage | OpenAI::BaseModel data - ) -> void + def initialize: ( + input_tokens: Integer, + input_tokens_details: OpenAI::Responses::ResponseUsage::InputTokensDetails, + output_tokens: Integer, + output_tokens_details: OpenAI::Responses::ResponseUsage::OutputTokensDetails, + total_tokens: Integer + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_usage + def to_hash: -> { + input_tokens: Integer, + input_tokens_details: OpenAI::Responses::ResponseUsage::InputTokensDetails, + output_tokens: Integer, + output_tokens_details: OpenAI::Responses::ResponseUsage::OutputTokensDetails, + total_tokens: Integer + } + + type input_tokens_details = { cached_tokens: Integer } + + class InputTokensDetails < OpenAI::Internal::Type::BaseModel + attr_accessor cached_tokens: Integer + + def initialize: (cached_tokens: Integer) -> void + + def to_hash: -> { cached_tokens: Integer } + end type output_tokens_details = { reasoning_tokens: Integer } - class OutputTokensDetails < OpenAI::BaseModel + class OutputTokensDetails < OpenAI::Internal::Type::BaseModel attr_accessor reasoning_tokens: Integer - def initialize: - (reasoning_tokens: Integer) -> void - | ( - ?OpenAI::Models::Responses::ResponseUsage::output_tokens_details - | OpenAI::BaseModel data - ) -> void + def initialize: (reasoning_tokens: Integer) -> void - def to_hash: -> OpenAI::Models::Responses::ResponseUsage::output_tokens_details + def to_hash: -> { reasoning_tokens: Integer } end end end diff --git a/sig/openai/models/responses/response_web_search_call_completed_event.rbs b/sig/openai/models/responses/response_web_search_call_completed_event.rbs index 3be8bb79..66882df9 100644 --- a/sig/openai/models/responses/response_web_search_call_completed_event.rbs +++ b/sig/openai/models/responses/response_web_search_call_completed_event.rbs @@ -5,28 +5,32 @@ module OpenAI { item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.web_search_call.completed" } - class ResponseWebSearchCallCompletedEvent < OpenAI::BaseModel + class ResponseWebSearchCallCompletedEvent < OpenAI::Internal::Type::BaseModel attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.web_search_call.completed" - def initialize: - ( - item_id: String, - output_index: Integer, - type: :"response.web_search_call.completed" - ) -> void - | ( - ?OpenAI::Models::Responses::response_web_search_call_completed_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.web_search_call.completed" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_web_search_call_completed_event + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.web_search_call.completed" + } end end end diff --git a/sig/openai/models/responses/response_web_search_call_in_progress_event.rbs b/sig/openai/models/responses/response_web_search_call_in_progress_event.rbs index f8d56aa4..b2928e82 100644 --- a/sig/openai/models/responses/response_web_search_call_in_progress_event.rbs +++ b/sig/openai/models/responses/response_web_search_call_in_progress_event.rbs @@ -5,28 +5,32 @@ module OpenAI { item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.web_search_call.in_progress" } - class ResponseWebSearchCallInProgressEvent < OpenAI::BaseModel + class ResponseWebSearchCallInProgressEvent < OpenAI::Internal::Type::BaseModel attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.web_search_call.in_progress" - def initialize: - ( - item_id: String, - output_index: Integer, - type: :"response.web_search_call.in_progress" - ) -> void - | ( - ?OpenAI::Models::Responses::response_web_search_call_in_progress_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.web_search_call.in_progress" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_web_search_call_in_progress_event + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.web_search_call.in_progress" + } end end end diff --git a/sig/openai/models/responses/response_web_search_call_searching_event.rbs b/sig/openai/models/responses/response_web_search_call_searching_event.rbs index 0e2a2c23..4c3a659f 100644 --- a/sig/openai/models/responses/response_web_search_call_searching_event.rbs +++ b/sig/openai/models/responses/response_web_search_call_searching_event.rbs @@ -5,28 +5,32 @@ module OpenAI { item_id: String, output_index: Integer, + sequence_number: Integer, type: :"response.web_search_call.searching" } - class ResponseWebSearchCallSearchingEvent < OpenAI::BaseModel + class ResponseWebSearchCallSearchingEvent < OpenAI::Internal::Type::BaseModel attr_accessor item_id: String attr_accessor output_index: Integer + attr_accessor sequence_number: Integer + attr_accessor type: :"response.web_search_call.searching" - def initialize: - ( - item_id: String, - output_index: Integer, - type: :"response.web_search_call.searching" - ) -> void - | ( - ?OpenAI::Models::Responses::response_web_search_call_searching_event - | OpenAI::BaseModel data - ) -> void + def initialize: ( + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.web_search_call.searching" + ) -> void - def to_hash: -> OpenAI::Models::Responses::response_web_search_call_searching_event + def to_hash: -> { + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.web_search_call.searching" + } end end end diff --git a/sig/openai/models/responses/tool.rbs b/sig/openai/models/responses/tool.rbs index 62a0dd21..130dbb9f 100644 --- a/sig/openai/models/responses/tool.rbs +++ b/sig/openai/models/responses/tool.rbs @@ -2,13 +2,487 @@ module OpenAI module Models module Responses type tool = - OpenAI::Models::Responses::FileSearchTool - | OpenAI::Models::Responses::FunctionTool - | OpenAI::Models::Responses::ComputerTool - | OpenAI::Models::Responses::WebSearchTool + OpenAI::Responses::FunctionTool + | OpenAI::Responses::FileSearchTool + | OpenAI::Responses::ComputerTool + | OpenAI::Responses::Tool::Mcp + | OpenAI::Responses::Tool::CodeInterpreter + | OpenAI::Responses::Tool::ImageGeneration + | OpenAI::Responses::Tool::LocalShell + | OpenAI::Responses::CustomTool + | OpenAI::Responses::WebSearchTool - class Tool < OpenAI::Union - private def self.variants: -> [[:file_search, OpenAI::Models::Responses::FileSearchTool], [:function, OpenAI::Models::Responses::FunctionTool], [:computer_use_preview, OpenAI::Models::Responses::ComputerTool], [nil, OpenAI::Models::Responses::WebSearchTool]] + module Tool + extend OpenAI::Internal::Type::Union + + type mcp = + { + server_label: String, + type: :mcp, + allowed_tools: OpenAI::Models::Responses::Tool::Mcp::allowed_tools?, + authorization: String, + connector_id: OpenAI::Models::Responses::Tool::Mcp::connector_id, + headers: ::Hash[Symbol, String]?, + require_approval: OpenAI::Models::Responses::Tool::Mcp::require_approval?, + server_description: String, + server_url: String + } + + class Mcp < OpenAI::Internal::Type::BaseModel + attr_accessor server_label: String + + attr_accessor type: :mcp + + attr_accessor allowed_tools: OpenAI::Models::Responses::Tool::Mcp::allowed_tools? + + attr_reader authorization: String? + + def authorization=: (String) -> String + + attr_reader connector_id: OpenAI::Models::Responses::Tool::Mcp::connector_id? + + def connector_id=: ( + OpenAI::Models::Responses::Tool::Mcp::connector_id + ) -> OpenAI::Models::Responses::Tool::Mcp::connector_id + + attr_accessor headers: ::Hash[Symbol, String]? + + attr_accessor require_approval: OpenAI::Models::Responses::Tool::Mcp::require_approval? + + attr_reader server_description: String? + + def server_description=: (String) -> String + + attr_reader server_url: String? + + def server_url=: (String) -> String + + def initialize: ( + server_label: String, + ?allowed_tools: OpenAI::Models::Responses::Tool::Mcp::allowed_tools?, + ?authorization: String, + ?connector_id: OpenAI::Models::Responses::Tool::Mcp::connector_id, + ?headers: ::Hash[Symbol, String]?, + ?require_approval: OpenAI::Models::Responses::Tool::Mcp::require_approval?, + ?server_description: String, + ?server_url: String, + ?type: :mcp + ) -> void + + def to_hash: -> { + server_label: String, + type: :mcp, + allowed_tools: OpenAI::Models::Responses::Tool::Mcp::allowed_tools?, + authorization: String, + connector_id: OpenAI::Models::Responses::Tool::Mcp::connector_id, + headers: ::Hash[Symbol, String]?, + require_approval: OpenAI::Models::Responses::Tool::Mcp::require_approval?, + server_description: String, + server_url: String + } + + type allowed_tools = + ::Array[String] + | OpenAI::Responses::Tool::Mcp::AllowedTools::McpToolFilter + + module AllowedTools + extend OpenAI::Internal::Type::Union + + type mcp_tool_filter = + { read_only: bool, tool_names: ::Array[String] } + + class McpToolFilter < OpenAI::Internal::Type::BaseModel + attr_reader read_only: bool? + + def read_only=: (bool) -> bool + + attr_reader tool_names: ::Array[String]? + + def tool_names=: (::Array[String]) -> ::Array[String] + + def initialize: ( + ?read_only: bool, + ?tool_names: ::Array[String] + ) -> void + + def to_hash: -> { read_only: bool, tool_names: ::Array[String] } + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::Tool::Mcp::allowed_tools] + + StringArray: OpenAI::Internal::Type::Converter + end + + type connector_id = + :connector_dropbox + | :connector_gmail + | :connector_googlecalendar + | :connector_googledrive + | :connector_microsoftteams + | :connector_outlookcalendar + | :connector_outlookemail + | :connector_sharepoint + + module ConnectorID + extend OpenAI::Internal::Type::Enum + + CONNECTOR_DROPBOX: :connector_dropbox + CONNECTOR_GMAIL: :connector_gmail + CONNECTOR_GOOGLECALENDAR: :connector_googlecalendar + CONNECTOR_GOOGLEDRIVE: :connector_googledrive + CONNECTOR_MICROSOFTTEAMS: :connector_microsoftteams + CONNECTOR_OUTLOOKCALENDAR: :connector_outlookcalendar + CONNECTOR_OUTLOOKEMAIL: :connector_outlookemail + CONNECTOR_SHAREPOINT: :connector_sharepoint + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::Mcp::connector_id] + end + + type require_approval = + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter + | OpenAI::Models::Responses::Tool::Mcp::RequireApproval::mcp_tool_approval_setting + + module RequireApproval + extend OpenAI::Internal::Type::Union + + type mcp_tool_approval_filter = + { + always: OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always, + never: OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never + } + + class McpToolApprovalFilter < OpenAI::Internal::Type::BaseModel + attr_reader always: OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always? + + def always=: ( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always + ) -> OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always + + attr_reader never: OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never? + + def never=: ( + OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never + ) -> OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never + + def initialize: ( + ?always: OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always, + ?never: OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never + ) -> void + + def to_hash: -> { + always: OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always, + never: OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never + } + + type always = { read_only: bool, tool_names: ::Array[String] } + + class Always < OpenAI::Internal::Type::BaseModel + attr_reader read_only: bool? + + def read_only=: (bool) -> bool + + attr_reader tool_names: ::Array[String]? + + def tool_names=: (::Array[String]) -> ::Array[String] + + def initialize: ( + ?read_only: bool, + ?tool_names: ::Array[String] + ) -> void + + def to_hash: -> { read_only: bool, tool_names: ::Array[String] } + end + + type never = { read_only: bool, tool_names: ::Array[String] } + + class Never < OpenAI::Internal::Type::BaseModel + attr_reader read_only: bool? + + def read_only=: (bool) -> bool + + attr_reader tool_names: ::Array[String]? + + def tool_names=: (::Array[String]) -> ::Array[String] + + def initialize: ( + ?read_only: bool, + ?tool_names: ::Array[String] + ) -> void + + def to_hash: -> { read_only: bool, tool_names: ::Array[String] } + end + end + + type mcp_tool_approval_setting = :always | :never + + module McpToolApprovalSetting + extend OpenAI::Internal::Type::Enum + + ALWAYS: :always + NEVER: :never + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::Mcp::RequireApproval::mcp_tool_approval_setting] + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::Tool::Mcp::require_approval] + end + end + + type code_interpreter = + { + container: OpenAI::Models::Responses::Tool::CodeInterpreter::container, + type: :code_interpreter + } + + class CodeInterpreter < OpenAI::Internal::Type::BaseModel + attr_accessor container: OpenAI::Models::Responses::Tool::CodeInterpreter::container + + attr_accessor type: :code_interpreter + + def initialize: ( + container: OpenAI::Models::Responses::Tool::CodeInterpreter::container, + ?type: :code_interpreter + ) -> void + + def to_hash: -> { + container: OpenAI::Models::Responses::Tool::CodeInterpreter::container, + type: :code_interpreter + } + + type container = + String + | OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto + + module Container + extend OpenAI::Internal::Type::Union + + type code_interpreter_tool_auto = + { type: :auto, file_ids: ::Array[String] } + + class CodeInterpreterToolAuto < OpenAI::Internal::Type::BaseModel + attr_accessor type: :auto + + attr_reader file_ids: ::Array[String]? + + def file_ids=: (::Array[String]) -> ::Array[String] + + def initialize: (?file_ids: ::Array[String], ?type: :auto) -> void + + def to_hash: -> { type: :auto, file_ids: ::Array[String] } + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::Tool::CodeInterpreter::container] + end + end + + type image_generation = + { + type: :image_generation, + background: OpenAI::Models::Responses::Tool::ImageGeneration::background, + input_fidelity: OpenAI::Models::Responses::Tool::ImageGeneration::input_fidelity?, + input_image_mask: OpenAI::Responses::Tool::ImageGeneration::InputImageMask, + model: OpenAI::Models::Responses::Tool::ImageGeneration::model, + moderation: OpenAI::Models::Responses::Tool::ImageGeneration::moderation, + output_compression: Integer, + output_format: OpenAI::Models::Responses::Tool::ImageGeneration::output_format, + partial_images: Integer, + quality: OpenAI::Models::Responses::Tool::ImageGeneration::quality, + size: OpenAI::Models::Responses::Tool::ImageGeneration::size + } + + class ImageGeneration < OpenAI::Internal::Type::BaseModel + attr_accessor type: :image_generation + + attr_reader background: OpenAI::Models::Responses::Tool::ImageGeneration::background? + + def background=: ( + OpenAI::Models::Responses::Tool::ImageGeneration::background + ) -> OpenAI::Models::Responses::Tool::ImageGeneration::background + + attr_accessor input_fidelity: OpenAI::Models::Responses::Tool::ImageGeneration::input_fidelity? + + attr_reader input_image_mask: OpenAI::Responses::Tool::ImageGeneration::InputImageMask? + + def input_image_mask=: ( + OpenAI::Responses::Tool::ImageGeneration::InputImageMask + ) -> OpenAI::Responses::Tool::ImageGeneration::InputImageMask + + attr_reader model: OpenAI::Models::Responses::Tool::ImageGeneration::model? + + def model=: ( + OpenAI::Models::Responses::Tool::ImageGeneration::model + ) -> OpenAI::Models::Responses::Tool::ImageGeneration::model + + attr_reader moderation: OpenAI::Models::Responses::Tool::ImageGeneration::moderation? + + def moderation=: ( + OpenAI::Models::Responses::Tool::ImageGeneration::moderation + ) -> OpenAI::Models::Responses::Tool::ImageGeneration::moderation + + attr_reader output_compression: Integer? + + def output_compression=: (Integer) -> Integer + + attr_reader output_format: OpenAI::Models::Responses::Tool::ImageGeneration::output_format? + + def output_format=: ( + OpenAI::Models::Responses::Tool::ImageGeneration::output_format + ) -> OpenAI::Models::Responses::Tool::ImageGeneration::output_format + + attr_reader partial_images: Integer? + + def partial_images=: (Integer) -> Integer + + attr_reader quality: OpenAI::Models::Responses::Tool::ImageGeneration::quality? + + def quality=: ( + OpenAI::Models::Responses::Tool::ImageGeneration::quality + ) -> OpenAI::Models::Responses::Tool::ImageGeneration::quality + + attr_reader size: OpenAI::Models::Responses::Tool::ImageGeneration::size? + + def size=: ( + OpenAI::Models::Responses::Tool::ImageGeneration::size + ) -> OpenAI::Models::Responses::Tool::ImageGeneration::size + + def initialize: ( + ?background: OpenAI::Models::Responses::Tool::ImageGeneration::background, + ?input_fidelity: OpenAI::Models::Responses::Tool::ImageGeneration::input_fidelity?, + ?input_image_mask: OpenAI::Responses::Tool::ImageGeneration::InputImageMask, + ?model: OpenAI::Models::Responses::Tool::ImageGeneration::model, + ?moderation: OpenAI::Models::Responses::Tool::ImageGeneration::moderation, + ?output_compression: Integer, + ?output_format: OpenAI::Models::Responses::Tool::ImageGeneration::output_format, + ?partial_images: Integer, + ?quality: OpenAI::Models::Responses::Tool::ImageGeneration::quality, + ?size: OpenAI::Models::Responses::Tool::ImageGeneration::size, + ?type: :image_generation + ) -> void + + def to_hash: -> { + type: :image_generation, + background: OpenAI::Models::Responses::Tool::ImageGeneration::background, + input_fidelity: OpenAI::Models::Responses::Tool::ImageGeneration::input_fidelity?, + input_image_mask: OpenAI::Responses::Tool::ImageGeneration::InputImageMask, + model: OpenAI::Models::Responses::Tool::ImageGeneration::model, + moderation: OpenAI::Models::Responses::Tool::ImageGeneration::moderation, + output_compression: Integer, + output_format: OpenAI::Models::Responses::Tool::ImageGeneration::output_format, + partial_images: Integer, + quality: OpenAI::Models::Responses::Tool::ImageGeneration::quality, + size: OpenAI::Models::Responses::Tool::ImageGeneration::size + } + + type background = :transparent | :opaque | :auto + + module Background + extend OpenAI::Internal::Type::Enum + + TRANSPARENT: :transparent + OPAQUE: :opaque + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::background] + end + + type input_fidelity = :high | :low + + module InputFidelity + extend OpenAI::Internal::Type::Enum + + HIGH: :high + LOW: :low + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::input_fidelity] + end + + type input_image_mask = { file_id: String, image_url: String } + + class InputImageMask < OpenAI::Internal::Type::BaseModel + attr_reader file_id: String? + + def file_id=: (String) -> String + + attr_reader image_url: String? + + def image_url=: (String) -> String + + def initialize: (?file_id: String, ?image_url: String) -> void + + def to_hash: -> { file_id: String, image_url: String } + end + + type model = :"gpt-image-1" + + module Model + extend OpenAI::Internal::Type::Enum + + GPT_IMAGE_1: :"gpt-image-1" + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::model] + end + + type moderation = :auto | :low + + module Moderation + extend OpenAI::Internal::Type::Enum + + AUTO: :auto + LOW: :low + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::moderation] + end + + type output_format = :png | :webp | :jpeg + + module OutputFormat + extend OpenAI::Internal::Type::Enum + + PNG: :png + WEBP: :webp + JPEG: :jpeg + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::output_format] + end + + type quality = :low | :medium | :high | :auto + + module Quality + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::quality] + end + + type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto + + module Size + extend OpenAI::Internal::Type::Enum + + SIZE_1024X1024: :"1024x1024" + SIZE_1024X1536: :"1024x1536" + SIZE_1536X1024: :"1536x1024" + AUTO: :auto + + def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::ImageGeneration::size] + end + end + + type local_shell = { type: :local_shell } + + class LocalShell < OpenAI::Internal::Type::BaseModel + attr_accessor type: :local_shell + + def initialize: (?type: :local_shell) -> void + + def to_hash: -> { type: :local_shell } + end + + def self?.variants: -> ::Array[OpenAI::Models::Responses::tool] end end end diff --git a/sig/openai/models/responses/tool_choice_allowed.rbs b/sig/openai/models/responses/tool_choice_allowed.rbs new file mode 100644 index 00000000..add7a8ce --- /dev/null +++ b/sig/openai/models/responses/tool_choice_allowed.rbs @@ -0,0 +1,43 @@ +module OpenAI + module Models + module Responses + type tool_choice_allowed = + { + mode: OpenAI::Models::Responses::ToolChoiceAllowed::mode, + tools: ::Array[::Hash[Symbol, top]], + type: :allowed_tools + } + + class ToolChoiceAllowed < OpenAI::Internal::Type::BaseModel + attr_accessor mode: OpenAI::Models::Responses::ToolChoiceAllowed::mode + + attr_accessor tools: ::Array[::Hash[Symbol, top]] + + attr_accessor type: :allowed_tools + + def initialize: ( + mode: OpenAI::Models::Responses::ToolChoiceAllowed::mode, + tools: ::Array[::Hash[Symbol, top]], + ?type: :allowed_tools + ) -> void + + def to_hash: -> { + mode: OpenAI::Models::Responses::ToolChoiceAllowed::mode, + tools: ::Array[::Hash[Symbol, top]], + type: :allowed_tools + } + + type mode = :auto | :required + + module Mode + extend OpenAI::Internal::Type::Enum + + AUTO: :auto + REQUIRED: :required + + def self?.values: -> ::Array[OpenAI::Models::Responses::ToolChoiceAllowed::mode] + end + end + end + end +end diff --git a/sig/openai/models/responses/tool_choice_custom.rbs b/sig/openai/models/responses/tool_choice_custom.rbs new file mode 100644 index 00000000..9848fb4f --- /dev/null +++ b/sig/openai/models/responses/tool_choice_custom.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Responses + type tool_choice_custom = { name: String, type: :custom } + + class ToolChoiceCustom < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + attr_accessor type: :custom + + def initialize: (name: String, ?type: :custom) -> void + + def to_hash: -> { name: String, type: :custom } + end + end + end +end diff --git a/sig/openai/models/responses/tool_choice_function.rbs b/sig/openai/models/responses/tool_choice_function.rbs index 7dbf4708..10aa7372 100644 --- a/sig/openai/models/responses/tool_choice_function.rbs +++ b/sig/openai/models/responses/tool_choice_function.rbs @@ -3,19 +3,14 @@ module OpenAI module Responses type tool_choice_function = { name: String, type: :function } - class ToolChoiceFunction < OpenAI::BaseModel + class ToolChoiceFunction < OpenAI::Internal::Type::BaseModel attr_accessor name: String attr_accessor type: :function - def initialize: - (name: String, type: :function) -> void - | ( - ?OpenAI::Models::Responses::tool_choice_function - | OpenAI::BaseModel data - ) -> void + def initialize: (name: String, ?type: :function) -> void - def to_hash: -> OpenAI::Models::Responses::tool_choice_function + def to_hash: -> { name: String, type: :function } end end end diff --git a/sig/openai/models/responses/tool_choice_mcp.rbs b/sig/openai/models/responses/tool_choice_mcp.rbs new file mode 100644 index 00000000..1a5f20a6 --- /dev/null +++ b/sig/openai/models/responses/tool_choice_mcp.rbs @@ -0,0 +1,23 @@ +module OpenAI + module Models + module Responses + type tool_choice_mcp = { server_label: String, type: :mcp, name: String? } + + class ToolChoiceMcp < OpenAI::Internal::Type::BaseModel + attr_accessor server_label: String + + attr_accessor type: :mcp + + attr_accessor name: String? + + def initialize: ( + server_label: String, + ?name: String?, + ?type: :mcp + ) -> void + + def to_hash: -> { server_label: String, type: :mcp, name: String? } + end + end + end +end diff --git a/sig/openai/models/responses/tool_choice_options.rbs b/sig/openai/models/responses/tool_choice_options.rbs index e902ea2c..f07ff227 100644 --- a/sig/openai/models/responses/tool_choice_options.rbs +++ b/sig/openai/models/responses/tool_choice_options.rbs @@ -3,12 +3,14 @@ module OpenAI module Responses type tool_choice_options = :none | :auto | :required - class ToolChoiceOptions < OpenAI::Enum + module ToolChoiceOptions + extend OpenAI::Internal::Type::Enum + NONE: :none AUTO: :auto REQUIRED: :required - def self.values: -> ::Array[OpenAI::Models::Responses::tool_choice_options] + def self?.values: -> ::Array[OpenAI::Models::Responses::tool_choice_options] end end end diff --git a/sig/openai/models/responses/tool_choice_types.rbs b/sig/openai/models/responses/tool_choice_types.rbs index 66e763a9..cae0cf70 100644 --- a/sig/openai/models/responses/tool_choice_types.rbs +++ b/sig/openai/models/responses/tool_choice_types.rbs @@ -4,31 +4,36 @@ module OpenAI type tool_choice_types = { type: OpenAI::Models::Responses::ToolChoiceTypes::type_ } - class ToolChoiceTypes < OpenAI::BaseModel + class ToolChoiceTypes < OpenAI::Internal::Type::BaseModel attr_accessor type: OpenAI::Models::Responses::ToolChoiceTypes::type_ - def initialize: - (type: OpenAI::Models::Responses::ToolChoiceTypes::type_) -> void - | ( - ?OpenAI::Models::Responses::tool_choice_types - | OpenAI::BaseModel data - ) -> void + def initialize: ( + type: OpenAI::Models::Responses::ToolChoiceTypes::type_ + ) -> void - def to_hash: -> OpenAI::Models::Responses::tool_choice_types + def to_hash: -> { + type: OpenAI::Models::Responses::ToolChoiceTypes::type_ + } type type_ = :file_search | :web_search_preview | :computer_use_preview | :web_search_preview_2025_03_11 + | :image_generation + | :code_interpreter + + module Type + extend OpenAI::Internal::Type::Enum - class Type < OpenAI::Enum FILE_SEARCH: :file_search WEB_SEARCH_PREVIEW: :web_search_preview COMPUTER_USE_PREVIEW: :computer_use_preview WEB_SEARCH_PREVIEW_2025_03_11: :web_search_preview_2025_03_11 + IMAGE_GENERATION: :image_generation + CODE_INTERPRETER: :code_interpreter - def self.values: -> ::Array[OpenAI::Models::Responses::ToolChoiceTypes::type_] + def self?.values: -> ::Array[OpenAI::Models::Responses::ToolChoiceTypes::type_] end end end diff --git a/sig/openai/models/responses/web_search_tool.rbs b/sig/openai/models/responses/web_search_tool.rbs index e815a5e3..3812d3bb 100644 --- a/sig/openai/models/responses/web_search_tool.rbs +++ b/sig/openai/models/responses/web_search_tool.rbs @@ -5,10 +5,10 @@ module OpenAI { type: OpenAI::Models::Responses::WebSearchTool::type_, search_context_size: OpenAI::Models::Responses::WebSearchTool::search_context_size, - user_location: OpenAI::Models::Responses::WebSearchTool::UserLocation? + user_location: OpenAI::Responses::WebSearchTool::UserLocation? } - class WebSearchTool < OpenAI::BaseModel + class WebSearchTool < OpenAI::Internal::Type::BaseModel attr_accessor type: OpenAI::Models::Responses::WebSearchTool::type_ attr_reader search_context_size: OpenAI::Models::Responses::WebSearchTool::search_context_size? @@ -17,81 +17,78 @@ module OpenAI OpenAI::Models::Responses::WebSearchTool::search_context_size ) -> OpenAI::Models::Responses::WebSearchTool::search_context_size - attr_accessor user_location: OpenAI::Models::Responses::WebSearchTool::UserLocation? + attr_accessor user_location: OpenAI::Responses::WebSearchTool::UserLocation? - def initialize: - ( - type: OpenAI::Models::Responses::WebSearchTool::type_, - search_context_size: OpenAI::Models::Responses::WebSearchTool::search_context_size, - user_location: OpenAI::Models::Responses::WebSearchTool::UserLocation? - ) -> void - | ( - ?OpenAI::Models::Responses::web_search_tool | OpenAI::BaseModel data - ) -> void + def initialize: ( + type: OpenAI::Models::Responses::WebSearchTool::type_, + ?search_context_size: OpenAI::Models::Responses::WebSearchTool::search_context_size, + ?user_location: OpenAI::Responses::WebSearchTool::UserLocation? + ) -> void - def to_hash: -> OpenAI::Models::Responses::web_search_tool + def to_hash: -> { + type: OpenAI::Models::Responses::WebSearchTool::type_, + search_context_size: OpenAI::Models::Responses::WebSearchTool::search_context_size, + user_location: OpenAI::Responses::WebSearchTool::UserLocation? + } type type_ = :web_search_preview | :web_search_preview_2025_03_11 - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + WEB_SEARCH_PREVIEW: :web_search_preview WEB_SEARCH_PREVIEW_2025_03_11: :web_search_preview_2025_03_11 - def self.values: -> ::Array[OpenAI::Models::Responses::WebSearchTool::type_] + def self?.values: -> ::Array[OpenAI::Models::Responses::WebSearchTool::type_] end type search_context_size = :low | :medium | :high - class SearchContextSize < OpenAI::Enum + module SearchContextSize + extend OpenAI::Internal::Type::Enum + LOW: :low MEDIUM: :medium HIGH: :high - def self.values: -> ::Array[OpenAI::Models::Responses::WebSearchTool::search_context_size] + def self?.values: -> ::Array[OpenAI::Models::Responses::WebSearchTool::search_context_size] end type user_location = { type: :approximate, - city: String, - country: String, - region: String, - timezone: String + city: String?, + country: String?, + region: String?, + timezone: String? } - class UserLocation < OpenAI::BaseModel + class UserLocation < OpenAI::Internal::Type::BaseModel attr_accessor type: :approximate - attr_reader city: String? + attr_accessor city: String? - def city=: (String) -> String + attr_accessor country: String? - attr_reader country: String? + attr_accessor region: String? - def country=: (String) -> String + attr_accessor timezone: String? - attr_reader region: String? - - def region=: (String) -> String - - attr_reader timezone: String? - - def timezone=: (String) -> String - - def initialize: - ( - city: String, - country: String, - region: String, - timezone: String, - type: :approximate - ) -> void - | ( - ?OpenAI::Models::Responses::WebSearchTool::user_location - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?city: String?, + ?country: String?, + ?region: String?, + ?timezone: String?, + ?type: :approximate + ) -> void - def to_hash: -> OpenAI::Models::Responses::WebSearchTool::user_location + def to_hash: -> { + type: :approximate, + city: String?, + country: String?, + region: String?, + timezone: String? + } end end end diff --git a/sig/openai/models/responses_model.rbs b/sig/openai/models/responses_model.rbs new file mode 100644 index 00000000..fb64b13b --- /dev/null +++ b/sig/openai/models/responses_model.rbs @@ -0,0 +1,43 @@ +module OpenAI + module Models + type responses_model = + String + | OpenAI::Models::chat_model + | OpenAI::Models::ResponsesModel::responses_only_model + + module ResponsesModel + extend OpenAI::Internal::Type::Union + + type responses_only_model = + :"o1-pro" + | :"o1-pro-2025-03-19" + | :"o3-pro" + | :"o3-pro-2025-06-10" + | :"o3-deep-research" + | :"o3-deep-research-2025-06-26" + | :"o4-mini-deep-research" + | :"o4-mini-deep-research-2025-06-26" + | :"computer-use-preview" + | :"computer-use-preview-2025-03-11" + + module ResponsesOnlyModel + extend OpenAI::Internal::Type::Enum + + O1_PRO: :"o1-pro" + O1_PRO_2025_03_19: :"o1-pro-2025-03-19" + O3_PRO: :"o3-pro" + O3_PRO_2025_06_10: :"o3-pro-2025-06-10" + O3_DEEP_RESEARCH: :"o3-deep-research" + O3_DEEP_RESEARCH_2025_06_26: :"o3-deep-research-2025-06-26" + O4_MINI_DEEP_RESEARCH: :"o4-mini-deep-research" + O4_MINI_DEEP_RESEARCH_2025_06_26: :"o4-mini-deep-research-2025-06-26" + COMPUTER_USE_PREVIEW: :"computer-use-preview" + COMPUTER_USE_PREVIEW_2025_03_11: :"computer-use-preview-2025-03-11" + + def self?.values: -> ::Array[OpenAI::Models::ResponsesModel::responses_only_model] + end + + def self?.variants: -> ::Array[OpenAI::Models::responses_model] + end + end +end diff --git a/sig/openai/models/static_file_chunking_strategy.rbs b/sig/openai/models/static_file_chunking_strategy.rbs index a10dab32..1d8dee49 100644 --- a/sig/openai/models/static_file_chunking_strategy.rbs +++ b/sig/openai/models/static_file_chunking_strategy.rbs @@ -3,19 +3,20 @@ module OpenAI type static_file_chunking_strategy = { chunk_overlap_tokens: Integer, max_chunk_size_tokens: Integer } - class StaticFileChunkingStrategy < OpenAI::BaseModel + class StaticFileChunkingStrategy < OpenAI::Internal::Type::BaseModel attr_accessor chunk_overlap_tokens: Integer attr_accessor max_chunk_size_tokens: Integer - def initialize: - (chunk_overlap_tokens: Integer, max_chunk_size_tokens: Integer) -> void - | ( - ?OpenAI::Models::static_file_chunking_strategy - | OpenAI::BaseModel data - ) -> void + def initialize: ( + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + ) -> void - def to_hash: -> OpenAI::Models::static_file_chunking_strategy + def to_hash: -> { + chunk_overlap_tokens: Integer, + max_chunk_size_tokens: Integer + } end end end diff --git a/sig/openai/models/static_file_chunking_strategy_object.rbs b/sig/openai/models/static_file_chunking_strategy_object.rbs index f3b1f264..e65aa3db 100644 --- a/sig/openai/models/static_file_chunking_strategy_object.rbs +++ b/sig/openai/models/static_file_chunking_strategy_object.rbs @@ -1,24 +1,22 @@ module OpenAI module Models type static_file_chunking_strategy_object = - { static: OpenAI::Models::StaticFileChunkingStrategy, type: :static } + { static: OpenAI::StaticFileChunkingStrategy, type: :static } - class StaticFileChunkingStrategyObject < OpenAI::BaseModel - attr_accessor static: OpenAI::Models::StaticFileChunkingStrategy + class StaticFileChunkingStrategyObject < OpenAI::Internal::Type::BaseModel + attr_accessor static: OpenAI::StaticFileChunkingStrategy attr_accessor type: :static - def initialize: - ( - static: OpenAI::Models::StaticFileChunkingStrategy, - type: :static - ) -> void - | ( - ?OpenAI::Models::static_file_chunking_strategy_object - | OpenAI::BaseModel data - ) -> void + def initialize: ( + static: OpenAI::StaticFileChunkingStrategy, + ?type: :static + ) -> void - def to_hash: -> OpenAI::Models::static_file_chunking_strategy_object + def to_hash: -> { + static: OpenAI::StaticFileChunkingStrategy, + type: :static + } end end end diff --git a/sig/openai/models/static_file_chunking_strategy_object_param.rbs b/sig/openai/models/static_file_chunking_strategy_object_param.rbs index 9d6c4140..a722310f 100644 --- a/sig/openai/models/static_file_chunking_strategy_object_param.rbs +++ b/sig/openai/models/static_file_chunking_strategy_object_param.rbs @@ -1,24 +1,22 @@ module OpenAI module Models type static_file_chunking_strategy_object_param = - { static: OpenAI::Models::StaticFileChunkingStrategy, type: :static } + { static: OpenAI::StaticFileChunkingStrategy, type: :static } - class StaticFileChunkingStrategyObjectParam < OpenAI::BaseModel - attr_accessor static: OpenAI::Models::StaticFileChunkingStrategy + class StaticFileChunkingStrategyObjectParam < OpenAI::Internal::Type::BaseModel + attr_accessor static: OpenAI::StaticFileChunkingStrategy attr_accessor type: :static - def initialize: - ( - static: OpenAI::Models::StaticFileChunkingStrategy, - type: :static - ) -> void - | ( - ?OpenAI::Models::static_file_chunking_strategy_object_param - | OpenAI::BaseModel data - ) -> void + def initialize: ( + static: OpenAI::StaticFileChunkingStrategy, + ?type: :static + ) -> void - def to_hash: -> OpenAI::Models::static_file_chunking_strategy_object_param + def to_hash: -> { + static: OpenAI::StaticFileChunkingStrategy, + type: :static + } end end end diff --git a/sig/openai/models/upload.rbs b/sig/openai/models/upload.rbs index e58a0c87..01762451 100644 --- a/sig/openai/models/upload.rbs +++ b/sig/openai/models/upload.rbs @@ -10,10 +10,10 @@ module OpenAI object: :upload, purpose: String, status: OpenAI::Models::Upload::status, - file: OpenAI::Models::FileObject? + file: OpenAI::FileObject? } - class Upload < OpenAI::BaseModel + class Upload < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor bytes: Integer @@ -30,33 +30,43 @@ module OpenAI attr_accessor status: OpenAI::Models::Upload::status - attr_accessor file: OpenAI::Models::FileObject? + attr_accessor file: OpenAI::FileObject? - def initialize: - ( - id: String, - bytes: Integer, - created_at: Integer, - expires_at: Integer, - filename: String, - purpose: String, - status: OpenAI::Models::Upload::status, - file: OpenAI::Models::FileObject?, - object: :upload - ) -> void - | (?OpenAI::Models::upload | OpenAI::BaseModel data) -> void + def initialize: ( + id: String, + bytes: Integer, + created_at: Integer, + expires_at: Integer, + filename: String, + purpose: String, + status: OpenAI::Models::Upload::status, + ?file: OpenAI::FileObject?, + ?object: :upload + ) -> void - def to_hash: -> OpenAI::Models::upload + def to_hash: -> { + id: String, + bytes: Integer, + created_at: Integer, + expires_at: Integer, + filename: String, + object: :upload, + purpose: String, + status: OpenAI::Models::Upload::status, + file: OpenAI::FileObject? + } type status = :pending | :completed | :cancelled | :expired - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + PENDING: :pending COMPLETED: :completed CANCELLED: :cancelled EXPIRED: :expired - def self.values: -> ::Array[OpenAI::Models::Upload::status] + def self?.values: -> ::Array[OpenAI::Models::Upload::status] end end end diff --git a/sig/openai/models/upload_cancel_params.rbs b/sig/openai/models/upload_cancel_params.rbs index 4ddc9f7b..92040ad5 100644 --- a/sig/openai/models/upload_cancel_params.rbs +++ b/sig/openai/models/upload_cancel_params.rbs @@ -1,18 +1,15 @@ module OpenAI module Models - type upload_cancel_params = { } & OpenAI::request_parameters + type upload_cancel_params = + { } & OpenAI::Internal::Type::request_parameters - class UploadCancelParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class UploadCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::upload_cancel_params | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::upload_cancel_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/upload_complete_params.rbs b/sig/openai/models/upload_complete_params.rbs index a22d1531..c223bb70 100644 --- a/sig/openai/models/upload_complete_params.rbs +++ b/sig/openai/models/upload_complete_params.rbs @@ -1,11 +1,12 @@ module OpenAI module Models type upload_complete_params = - { part_ids: ::Array[String], :md5 => String } & OpenAI::request_parameters + { part_ids: ::Array[String], :md5 => String } + & OpenAI::Internal::Type::request_parameters - class UploadCompleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class UploadCompleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor part_ids: ::Array[String] @@ -13,17 +14,17 @@ module OpenAI def md5=: (String) -> String - def initialize: - ( - part_ids: ::Array[String], - md5: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::upload_complete_params | OpenAI::BaseModel data - ) -> void + def initialize: ( + part_ids: ::Array[String], + ?md5: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::upload_complete_params + def to_hash: -> { + part_ids: ::Array[String], + :md5 => String, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/upload_create_params.rbs b/sig/openai/models/upload_create_params.rbs index de006ed9..f9f767fb 100644 --- a/sig/openai/models/upload_create_params.rbs +++ b/sig/openai/models/upload_create_params.rbs @@ -5,13 +5,14 @@ module OpenAI bytes: Integer, filename: String, mime_type: String, - purpose: OpenAI::Models::file_purpose + purpose: OpenAI::Models::file_purpose, + expires_after: OpenAI::UploadCreateParams::ExpiresAfter } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class UploadCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class UploadCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor bytes: Integer @@ -21,19 +22,41 @@ module OpenAI attr_accessor purpose: OpenAI::Models::file_purpose - def initialize: - ( - bytes: Integer, - filename: String, - mime_type: String, - purpose: OpenAI::Models::file_purpose, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::upload_create_params | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::upload_create_params + attr_reader expires_after: OpenAI::UploadCreateParams::ExpiresAfter? + + def expires_after=: ( + OpenAI::UploadCreateParams::ExpiresAfter + ) -> OpenAI::UploadCreateParams::ExpiresAfter + + def initialize: ( + bytes: Integer, + filename: String, + mime_type: String, + purpose: OpenAI::Models::file_purpose, + ?expires_after: OpenAI::UploadCreateParams::ExpiresAfter, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + bytes: Integer, + filename: String, + mime_type: String, + purpose: OpenAI::Models::file_purpose, + expires_after: OpenAI::UploadCreateParams::ExpiresAfter, + request_options: OpenAI::RequestOptions + } + + type expires_after = { anchor: :created_at, seconds: Integer } + + class ExpiresAfter < OpenAI::Internal::Type::BaseModel + attr_accessor anchor: :created_at + + attr_accessor seconds: Integer + + def initialize: (seconds: Integer, ?anchor: :created_at) -> void + + def to_hash: -> { anchor: :created_at, seconds: Integer } + end end end end diff --git a/sig/openai/models/uploads/part_create_params.rbs b/sig/openai/models/uploads/part_create_params.rbs index 0276fa19..deeec480 100644 --- a/sig/openai/models/uploads/part_create_params.rbs +++ b/sig/openai/models/uploads/part_create_params.rbs @@ -2,22 +2,24 @@ module OpenAI module Models module Uploads type part_create_params = - { data: (IO | StringIO) } & OpenAI::request_parameters + { data: OpenAI::Internal::file_input } + & OpenAI::Internal::Type::request_parameters - class PartCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class PartCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - attr_accessor data: IO | StringIO + attr_accessor data: OpenAI::Internal::file_input - def initialize: - (data: IO | StringIO, request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::Uploads::part_create_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + data: OpenAI::Internal::file_input, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::Uploads::part_create_params + def to_hash: -> { + data: OpenAI::Internal::file_input, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/uploads/upload_part.rbs b/sig/openai/models/uploads/upload_part.rbs index 18ff6f6e..60554e85 100644 --- a/sig/openai/models/uploads/upload_part.rbs +++ b/sig/openai/models/uploads/upload_part.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class UploadPart = Uploads::UploadPart module Uploads @@ -12,7 +11,7 @@ module OpenAI upload_id: String } - class UploadPart < OpenAI::BaseModel + class UploadPart < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created_at: Integer @@ -21,18 +20,19 @@ module OpenAI attr_accessor upload_id: String - def initialize: - ( - id: String, - created_at: Integer, - upload_id: String, - object: :"upload.part" - ) -> void - | ( - ?OpenAI::Models::Uploads::upload_part | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + created_at: Integer, + upload_id: String, + ?object: :"upload.part" + ) -> void - def to_hash: -> OpenAI::Models::Uploads::upload_part + def to_hash: -> { + id: String, + created_at: Integer, + object: :"upload.part", + upload_id: String + } end end end diff --git a/sig/openai/models/vector_store.rbs b/sig/openai/models/vector_store.rbs index de85bfdb..24d34d54 100644 --- a/sig/openai/models/vector_store.rbs +++ b/sig/openai/models/vector_store.rbs @@ -4,23 +4,23 @@ module OpenAI { id: String, created_at: Integer, - file_counts: OpenAI::Models::VectorStore::FileCounts, + file_counts: OpenAI::VectorStore::FileCounts, last_active_at: Integer?, metadata: OpenAI::Models::metadata?, name: String, object: :vector_store, status: OpenAI::Models::VectorStore::status, usage_bytes: Integer, - expires_after: OpenAI::Models::VectorStore::ExpiresAfter, + expires_after: OpenAI::VectorStore::ExpiresAfter, expires_at: Integer? } - class VectorStore < OpenAI::BaseModel + class VectorStore < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created_at: Integer - attr_accessor file_counts: OpenAI::Models::VectorStore::FileCounts + attr_accessor file_counts: OpenAI::VectorStore::FileCounts attr_accessor last_active_at: Integer? @@ -34,31 +34,41 @@ module OpenAI attr_accessor usage_bytes: Integer - attr_reader expires_after: OpenAI::Models::VectorStore::ExpiresAfter? + attr_reader expires_after: OpenAI::VectorStore::ExpiresAfter? def expires_after=: ( - OpenAI::Models::VectorStore::ExpiresAfter - ) -> OpenAI::Models::VectorStore::ExpiresAfter + OpenAI::VectorStore::ExpiresAfter + ) -> OpenAI::VectorStore::ExpiresAfter attr_accessor expires_at: Integer? - def initialize: - ( - id: String, - created_at: Integer, - file_counts: OpenAI::Models::VectorStore::FileCounts, - last_active_at: Integer?, - metadata: OpenAI::Models::metadata?, - name: String, - status: OpenAI::Models::VectorStore::status, - usage_bytes: Integer, - expires_after: OpenAI::Models::VectorStore::ExpiresAfter, - expires_at: Integer?, - object: :vector_store - ) -> void - | (?OpenAI::Models::vector_store | OpenAI::BaseModel data) -> void + def initialize: ( + id: String, + created_at: Integer, + file_counts: OpenAI::VectorStore::FileCounts, + last_active_at: Integer?, + metadata: OpenAI::Models::metadata?, + name: String, + status: OpenAI::Models::VectorStore::status, + usage_bytes: Integer, + ?expires_after: OpenAI::VectorStore::ExpiresAfter, + ?expires_at: Integer?, + ?object: :vector_store + ) -> void - def to_hash: -> OpenAI::Models::vector_store + def to_hash: -> { + id: String, + created_at: Integer, + file_counts: OpenAI::VectorStore::FileCounts, + last_active_at: Integer?, + metadata: OpenAI::Models::metadata?, + name: String, + object: :vector_store, + status: OpenAI::Models::VectorStore::status, + usage_bytes: Integer, + expires_after: OpenAI::VectorStore::ExpiresAfter, + expires_at: Integer? + } type file_counts = { @@ -69,7 +79,7 @@ module OpenAI total: Integer } - class FileCounts < OpenAI::BaseModel + class FileCounts < OpenAI::Internal::Type::BaseModel attr_accessor cancelled: Integer attr_accessor completed: Integer @@ -80,45 +90,45 @@ module OpenAI attr_accessor total: Integer - def initialize: - ( - cancelled: Integer, - completed: Integer, - failed: Integer, - in_progress: Integer, - total: Integer - ) -> void - | ( - ?OpenAI::Models::VectorStore::file_counts | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::VectorStore::file_counts + def initialize: ( + cancelled: Integer, + completed: Integer, + failed: Integer, + in_progress: Integer, + total: Integer + ) -> void + + def to_hash: -> { + cancelled: Integer, + completed: Integer, + failed: Integer, + in_progress: Integer, + total: Integer + } end type status = :expired | :in_progress | :completed - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + EXPIRED: :expired IN_PROGRESS: :in_progress COMPLETED: :completed - def self.values: -> ::Array[OpenAI::Models::VectorStore::status] + def self?.values: -> ::Array[OpenAI::Models::VectorStore::status] end type expires_after = { anchor: :last_active_at, days: Integer } - class ExpiresAfter < OpenAI::BaseModel + class ExpiresAfter < OpenAI::Internal::Type::BaseModel attr_accessor anchor: :last_active_at attr_accessor days: Integer - def initialize: - (days: Integer, anchor: :last_active_at) -> void - | ( - ?OpenAI::Models::VectorStore::expires_after | OpenAI::BaseModel data - ) -> void + def initialize: (days: Integer, ?anchor: :last_active_at) -> void - def to_hash: -> OpenAI::Models::VectorStore::expires_after + def to_hash: -> { anchor: :last_active_at, days: Integer } end end end diff --git a/sig/openai/models/vector_store_create_params.rbs b/sig/openai/models/vector_store_create_params.rbs index 93b49fe7..d5c48eb6 100644 --- a/sig/openai/models/vector_store_create_params.rbs +++ b/sig/openai/models/vector_store_create_params.rbs @@ -3,16 +3,16 @@ module OpenAI type vector_store_create_params = { chunking_strategy: OpenAI::Models::file_chunking_strategy_param, - expires_after: OpenAI::Models::VectorStoreCreateParams::ExpiresAfter, + expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter, file_ids: ::Array[String], metadata: OpenAI::Models::metadata?, name: String } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class VectorStoreCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class VectorStoreCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader chunking_strategy: OpenAI::Models::file_chunking_strategy_param? @@ -20,11 +20,11 @@ module OpenAI OpenAI::Models::file_chunking_strategy_param ) -> OpenAI::Models::file_chunking_strategy_param - attr_reader expires_after: OpenAI::Models::VectorStoreCreateParams::ExpiresAfter? + attr_reader expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter? def expires_after=: ( - OpenAI::Models::VectorStoreCreateParams::ExpiresAfter - ) -> OpenAI::Models::VectorStoreCreateParams::ExpiresAfter + OpenAI::VectorStoreCreateParams::ExpiresAfter + ) -> OpenAI::VectorStoreCreateParams::ExpiresAfter attr_reader file_ids: ::Array[String]? @@ -36,36 +36,34 @@ module OpenAI def name=: (String) -> String - def initialize: - ( - chunking_strategy: OpenAI::Models::file_chunking_strategy_param, - expires_after: OpenAI::Models::VectorStoreCreateParams::ExpiresAfter, - file_ids: ::Array[String], - metadata: OpenAI::Models::metadata?, - name: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::vector_store_create_params | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::vector_store_create_params + def initialize: ( + ?chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + ?expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter, + ?file_ids: ::Array[String], + ?metadata: OpenAI::Models::metadata?, + ?name: String, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter, + file_ids: ::Array[String], + metadata: OpenAI::Models::metadata?, + name: String, + request_options: OpenAI::RequestOptions + } type expires_after = { anchor: :last_active_at, days: Integer } - class ExpiresAfter < OpenAI::BaseModel + class ExpiresAfter < OpenAI::Internal::Type::BaseModel attr_accessor anchor: :last_active_at attr_accessor days: Integer - def initialize: - (days: Integer, anchor: :last_active_at) -> void - | ( - ?OpenAI::Models::VectorStoreCreateParams::expires_after - | OpenAI::BaseModel data - ) -> void + def initialize: (days: Integer, ?anchor: :last_active_at) -> void - def to_hash: -> OpenAI::Models::VectorStoreCreateParams::expires_after + def to_hash: -> { anchor: :last_active_at, days: Integer } end end end diff --git a/sig/openai/models/vector_store_delete_params.rbs b/sig/openai/models/vector_store_delete_params.rbs index 42f031e5..d64b7288 100644 --- a/sig/openai/models/vector_store_delete_params.rbs +++ b/sig/openai/models/vector_store_delete_params.rbs @@ -1,18 +1,15 @@ module OpenAI module Models - type vector_store_delete_params = { } & OpenAI::request_parameters + type vector_store_delete_params = + { } & OpenAI::Internal::Type::request_parameters - class VectorStoreDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class VectorStoreDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::vector_store_delete_params | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::vector_store_delete_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/vector_store_deleted.rbs b/sig/openai/models/vector_store_deleted.rbs index 53fe34e4..1ca36ab4 100644 --- a/sig/openai/models/vector_store_deleted.rbs +++ b/sig/openai/models/vector_store_deleted.rbs @@ -3,20 +3,24 @@ module OpenAI type vector_store_deleted = { id: String, deleted: bool, object: :"vector_store.deleted" } - class VectorStoreDeleted < OpenAI::BaseModel + class VectorStoreDeleted < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor deleted: bool attr_accessor object: :"vector_store.deleted" - def initialize: - (id: String, deleted: bool, object: :"vector_store.deleted") -> void - | ( - ?OpenAI::Models::vector_store_deleted | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + deleted: bool, + ?object: :"vector_store.deleted" + ) -> void - def to_hash: -> OpenAI::Models::vector_store_deleted + def to_hash: -> { + id: String, + deleted: bool, + object: :"vector_store.deleted" + } end end end diff --git a/sig/openai/models/vector_store_list_params.rbs b/sig/openai/models/vector_store_list_params.rbs index 7ec8d50d..dfbe777d 100644 --- a/sig/openai/models/vector_store_list_params.rbs +++ b/sig/openai/models/vector_store_list_params.rbs @@ -7,11 +7,11 @@ module OpenAI limit: Integer, order: OpenAI::Models::VectorStoreListParams::order } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class VectorStoreListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class VectorStoreListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -31,27 +31,31 @@ module OpenAI OpenAI::Models::VectorStoreListParams::order ) -> OpenAI::Models::VectorStoreListParams::order - def initialize: - ( - after: String, - before: String, - limit: Integer, - order: OpenAI::Models::VectorStoreListParams::order, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::vector_store_list_params | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?after: String, + ?before: String, + ?limit: Integer, + ?order: OpenAI::Models::VectorStoreListParams::order, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::vector_store_list_params + def to_hash: -> { + after: String, + before: String, + limit: Integer, + order: OpenAI::Models::VectorStoreListParams::order, + request_options: OpenAI::RequestOptions + } type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::VectorStoreListParams::order] + def self?.values: -> ::Array[OpenAI::Models::VectorStoreListParams::order] end end end diff --git a/sig/openai/models/vector_store_retrieve_params.rbs b/sig/openai/models/vector_store_retrieve_params.rbs index a2603a79..92c244d4 100644 --- a/sig/openai/models/vector_store_retrieve_params.rbs +++ b/sig/openai/models/vector_store_retrieve_params.rbs @@ -1,18 +1,15 @@ module OpenAI module Models - type vector_store_retrieve_params = { } & OpenAI::request_parameters + type vector_store_retrieve_params = + { } & OpenAI::Internal::Type::request_parameters - class VectorStoreRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class VectorStoreRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - def initialize: - (request_options: OpenAI::request_opts) -> void - | ( - ?OpenAI::Models::vector_store_retrieve_params | OpenAI::BaseModel data - ) -> void + def initialize: (?request_options: OpenAI::request_opts) -> void - def to_hash: -> OpenAI::Models::vector_store_retrieve_params + def to_hash: -> { request_options: OpenAI::RequestOptions } end end end diff --git a/sig/openai/models/vector_store_search_params.rbs b/sig/openai/models/vector_store_search_params.rbs index 3f179fa9..aecf7fd7 100644 --- a/sig/openai/models/vector_store_search_params.rbs +++ b/sig/openai/models/vector_store_search_params.rbs @@ -5,14 +5,14 @@ module OpenAI query: OpenAI::Models::VectorStoreSearchParams::query, filters: OpenAI::Models::VectorStoreSearchParams::filters, max_num_results: Integer, - ranking_options: OpenAI::Models::VectorStoreSearchParams::RankingOptions, + ranking_options: OpenAI::VectorStoreSearchParams::RankingOptions, rewrite_query: bool } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class VectorStoreSearchParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class VectorStoreSearchParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor query: OpenAI::Models::VectorStoreSearchParams::query @@ -26,46 +26,50 @@ module OpenAI def max_num_results=: (Integer) -> Integer - attr_reader ranking_options: OpenAI::Models::VectorStoreSearchParams::RankingOptions? + attr_reader ranking_options: OpenAI::VectorStoreSearchParams::RankingOptions? def ranking_options=: ( - OpenAI::Models::VectorStoreSearchParams::RankingOptions - ) -> OpenAI::Models::VectorStoreSearchParams::RankingOptions + OpenAI::VectorStoreSearchParams::RankingOptions + ) -> OpenAI::VectorStoreSearchParams::RankingOptions attr_reader rewrite_query: bool? def rewrite_query=: (bool) -> bool - def initialize: - ( - query: OpenAI::Models::VectorStoreSearchParams::query, - filters: OpenAI::Models::VectorStoreSearchParams::filters, - max_num_results: Integer, - ranking_options: OpenAI::Models::VectorStoreSearchParams::RankingOptions, - rewrite_query: bool, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::vector_store_search_params | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::vector_store_search_params + def initialize: ( + query: OpenAI::Models::VectorStoreSearchParams::query, + ?filters: OpenAI::Models::VectorStoreSearchParams::filters, + ?max_num_results: Integer, + ?ranking_options: OpenAI::VectorStoreSearchParams::RankingOptions, + ?rewrite_query: bool, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + query: OpenAI::Models::VectorStoreSearchParams::query, + filters: OpenAI::Models::VectorStoreSearchParams::filters, + max_num_results: Integer, + ranking_options: OpenAI::VectorStoreSearchParams::RankingOptions, + rewrite_query: bool, + request_options: OpenAI::RequestOptions + } type query = String | ::Array[String] - class Query < OpenAI::Union - type string_array = ::Array[String] + module Query + extend OpenAI::Internal::Type::Union - StringArray: string_array + def self?.variants: -> ::Array[OpenAI::Models::VectorStoreSearchParams::query] - private def self.variants: -> [[nil, String], [nil, ::Array[String]]] + StringArray: OpenAI::Internal::Type::Converter end - type filters = - OpenAI::Models::ComparisonFilter | OpenAI::Models::CompoundFilter + type filters = OpenAI::ComparisonFilter | OpenAI::CompoundFilter + + module Filters + extend OpenAI::Internal::Type::Union - class Filters < OpenAI::Union - private def self.variants: -> [[nil, OpenAI::Models::ComparisonFilter], [nil, OpenAI::Models::CompoundFilter]] + def self?.variants: -> ::Array[OpenAI::Models::VectorStoreSearchParams::filters] end type ranking_options = @@ -74,7 +78,7 @@ module OpenAI score_threshold: Float } - class RankingOptions < OpenAI::BaseModel + class RankingOptions < OpenAI::Internal::Type::BaseModel attr_reader ranker: OpenAI::Models::VectorStoreSearchParams::RankingOptions::ranker? def ranker=: ( @@ -85,25 +89,26 @@ module OpenAI def score_threshold=: (Float) -> Float - def initialize: - ( - ranker: OpenAI::Models::VectorStoreSearchParams::RankingOptions::ranker, - score_threshold: Float - ) -> void - | ( - ?OpenAI::Models::VectorStoreSearchParams::ranking_options - | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?ranker: OpenAI::Models::VectorStoreSearchParams::RankingOptions::ranker, + ?score_threshold: Float + ) -> void + + def to_hash: -> { + ranker: OpenAI::Models::VectorStoreSearchParams::RankingOptions::ranker, + score_threshold: Float + } - def to_hash: -> OpenAI::Models::VectorStoreSearchParams::ranking_options + type ranker = :none | :auto | :"default-2024-11-15" - type ranker = :auto | :"default-2024-11-15" + module Ranker + extend OpenAI::Internal::Type::Enum - class Ranker < OpenAI::Enum + NONE: :none AUTO: :auto DEFAULT_2024_11_15: :"default-2024-11-15" - def self.values: -> ::Array[OpenAI::Models::VectorStoreSearchParams::RankingOptions::ranker] + def self?.values: -> ::Array[OpenAI::Models::VectorStoreSearchParams::RankingOptions::ranker] end end end diff --git a/sig/openai/models/vector_store_search_response.rbs b/sig/openai/models/vector_store_search_response.rbs index f9014f12..3ce0a392 100644 --- a/sig/openai/models/vector_store_search_response.rbs +++ b/sig/openai/models/vector_store_search_response.rbs @@ -9,7 +9,7 @@ module OpenAI score: Float } - class VectorStoreSearchResponse < OpenAI::BaseModel + class VectorStoreSearchResponse < OpenAI::Internal::Type::BaseModel attr_accessor attributes: ::Hash[Symbol, OpenAI::Models::VectorStoreSearchResponse::attribute]? attr_accessor content: ::Array[OpenAI::Models::VectorStoreSearchResponse::Content] @@ -20,24 +20,28 @@ module OpenAI attr_accessor score: Float - def initialize: - ( - attributes: ::Hash[Symbol, OpenAI::Models::VectorStoreSearchResponse::attribute]?, - content: ::Array[OpenAI::Models::VectorStoreSearchResponse::Content], - file_id: String, - filename: String, - score: Float - ) -> void - | ( - ?OpenAI::Models::vector_store_search_response | OpenAI::BaseModel data - ) -> void + def initialize: ( + attributes: ::Hash[Symbol, OpenAI::Models::VectorStoreSearchResponse::attribute]?, + content: ::Array[OpenAI::Models::VectorStoreSearchResponse::Content], + file_id: String, + filename: String, + score: Float + ) -> void - def to_hash: -> OpenAI::Models::vector_store_search_response + def to_hash: -> { + attributes: ::Hash[Symbol, OpenAI::Models::VectorStoreSearchResponse::attribute]?, + content: ::Array[OpenAI::Models::VectorStoreSearchResponse::Content], + file_id: String, + filename: String, + score: Float + } type attribute = String | Float | bool - class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + module Attribute + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::VectorStoreSearchResponse::attribute] end type content = @@ -46,29 +50,29 @@ module OpenAI type: OpenAI::Models::VectorStoreSearchResponse::Content::type_ } - class Content < OpenAI::BaseModel + class Content < OpenAI::Internal::Type::BaseModel attr_accessor text: String attr_accessor type: OpenAI::Models::VectorStoreSearchResponse::Content::type_ - def initialize: - ( - text: String, - type: OpenAI::Models::VectorStoreSearchResponse::Content::type_ - ) -> void - | ( - ?OpenAI::Models::VectorStoreSearchResponse::content - | OpenAI::BaseModel data - ) -> void + def initialize: ( + text: String, + type: OpenAI::Models::VectorStoreSearchResponse::Content::type_ + ) -> void - def to_hash: -> OpenAI::Models::VectorStoreSearchResponse::content + def to_hash: -> { + text: String, + type: OpenAI::Models::VectorStoreSearchResponse::Content::type_ + } type type_ = :text - class Type < OpenAI::Enum + module Type + extend OpenAI::Internal::Type::Enum + TEXT: :text - def self.values: -> ::Array[OpenAI::Models::VectorStoreSearchResponse::Content::type_] + def self?.values: -> ::Array[OpenAI::Models::VectorStoreSearchResponse::Content::type_] end end end diff --git a/sig/openai/models/vector_store_update_params.rbs b/sig/openai/models/vector_store_update_params.rbs index 33acab97..4faf2804 100644 --- a/sig/openai/models/vector_store_update_params.rbs +++ b/sig/openai/models/vector_store_update_params.rbs @@ -2,50 +2,46 @@ module OpenAI module Models type vector_store_update_params = { - expires_after: OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter?, + expires_after: OpenAI::VectorStoreUpdateParams::ExpiresAfter?, metadata: OpenAI::Models::metadata?, name: String? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class VectorStoreUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class VectorStoreUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters - attr_accessor expires_after: OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter? + attr_accessor expires_after: OpenAI::VectorStoreUpdateParams::ExpiresAfter? attr_accessor metadata: OpenAI::Models::metadata? attr_accessor name: String? - def initialize: - ( - expires_after: OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter?, - metadata: OpenAI::Models::metadata?, - name: String?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::vector_store_update_params | OpenAI::BaseModel data - ) -> void + def initialize: ( + ?expires_after: OpenAI::VectorStoreUpdateParams::ExpiresAfter?, + ?metadata: OpenAI::Models::metadata?, + ?name: String?, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::vector_store_update_params + def to_hash: -> { + expires_after: OpenAI::VectorStoreUpdateParams::ExpiresAfter?, + metadata: OpenAI::Models::metadata?, + name: String?, + request_options: OpenAI::RequestOptions + } type expires_after = { anchor: :last_active_at, days: Integer } - class ExpiresAfter < OpenAI::BaseModel + class ExpiresAfter < OpenAI::Internal::Type::BaseModel attr_accessor anchor: :last_active_at attr_accessor days: Integer - def initialize: - (days: Integer, anchor: :last_active_at) -> void - | ( - ?OpenAI::Models::VectorStoreUpdateParams::expires_after - | OpenAI::BaseModel data - ) -> void + def initialize: (days: Integer, ?anchor: :last_active_at) -> void - def to_hash: -> OpenAI::Models::VectorStoreUpdateParams::expires_after + def to_hash: -> { anchor: :last_active_at, days: Integer } end end end diff --git a/sig/openai/models/vector_stores/file_batch_cancel_params.rbs b/sig/openai/models/vector_stores/file_batch_cancel_params.rbs index 42b68be5..22b94cc4 100644 --- a/sig/openai/models/vector_stores/file_batch_cancel_params.rbs +++ b/sig/openai/models/vector_stores/file_batch_cancel_params.rbs @@ -2,25 +2,23 @@ module OpenAI module Models module VectorStores type file_batch_cancel_params = - { vector_store_id: String } & OpenAI::request_parameters + { vector_store_id: String } & OpenAI::Internal::Type::request_parameters - class FileBatchCancelParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileBatchCancelParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor vector_store_id: String - def initialize: - ( - vector_store_id: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::VectorStores::file_batch_cancel_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + vector_store_id: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::file_batch_cancel_params + def to_hash: -> { + vector_store_id: String, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/vector_stores/file_batch_create_params.rbs b/sig/openai/models/vector_stores/file_batch_create_params.rbs index 394219fe..b9eca2b5 100644 --- a/sig/openai/models/vector_stores/file_batch_create_params.rbs +++ b/sig/openai/models/vector_stores/file_batch_create_params.rbs @@ -7,11 +7,11 @@ module OpenAI attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?, chunking_strategy: OpenAI::Models::file_chunking_strategy_param } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class FileBatchCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileBatchCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor file_ids: ::Array[String] @@ -23,24 +23,26 @@ module OpenAI OpenAI::Models::file_chunking_strategy_param ) -> OpenAI::Models::file_chunking_strategy_param - def initialize: - ( - file_ids: ::Array[String], - attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?, - chunking_strategy: OpenAI::Models::file_chunking_strategy_param, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::VectorStores::file_batch_create_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + file_ids: ::Array[String], + ?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?, + ?chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::file_batch_create_params + def to_hash: -> { + file_ids: ::Array[String], + attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?, + chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + request_options: OpenAI::RequestOptions + } type attribute = String | Float | bool - class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + module Attribute + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::VectorStores::FileBatchCreateParams::attribute] end end end diff --git a/sig/openai/models/vector_stores/file_batch_list_files_params.rbs b/sig/openai/models/vector_stores/file_batch_list_files_params.rbs index 21e09293..92bc31fb 100644 --- a/sig/openai/models/vector_stores/file_batch_list_files_params.rbs +++ b/sig/openai/models/vector_stores/file_batch_list_files_params.rbs @@ -10,11 +10,11 @@ module OpenAI limit: Integer, order: OpenAI::Models::VectorStores::FileBatchListFilesParams::order } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class FileBatchListFilesParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileBatchListFilesParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor vector_store_id: String @@ -42,41 +42,48 @@ module OpenAI OpenAI::Models::VectorStores::FileBatchListFilesParams::order ) -> OpenAI::Models::VectorStores::FileBatchListFilesParams::order - def initialize: - ( - vector_store_id: String, - after: String, - before: String, - filter: OpenAI::Models::VectorStores::FileBatchListFilesParams::filter, - limit: Integer, - order: OpenAI::Models::VectorStores::FileBatchListFilesParams::order, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::VectorStores::file_batch_list_files_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::VectorStores::file_batch_list_files_params + def initialize: ( + vector_store_id: String, + ?after: String, + ?before: String, + ?filter: OpenAI::Models::VectorStores::FileBatchListFilesParams::filter, + ?limit: Integer, + ?order: OpenAI::Models::VectorStores::FileBatchListFilesParams::order, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + vector_store_id: String, + after: String, + before: String, + filter: OpenAI::Models::VectorStores::FileBatchListFilesParams::filter, + limit: Integer, + order: OpenAI::Models::VectorStores::FileBatchListFilesParams::order, + request_options: OpenAI::RequestOptions + } type filter = :in_progress | :completed | :failed | :cancelled - class Filter < OpenAI::Enum + module Filter + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress COMPLETED: :completed FAILED: :failed CANCELLED: :cancelled - def self.values: -> ::Array[OpenAI::Models::VectorStores::FileBatchListFilesParams::filter] + def self?.values: -> ::Array[OpenAI::Models::VectorStores::FileBatchListFilesParams::filter] end type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::VectorStores::FileBatchListFilesParams::order] + def self?.values: -> ::Array[OpenAI::Models::VectorStores::FileBatchListFilesParams::order] end end end diff --git a/sig/openai/models/vector_stores/file_batch_retrieve_params.rbs b/sig/openai/models/vector_stores/file_batch_retrieve_params.rbs index 090286fa..3f2ee92a 100644 --- a/sig/openai/models/vector_stores/file_batch_retrieve_params.rbs +++ b/sig/openai/models/vector_stores/file_batch_retrieve_params.rbs @@ -2,25 +2,23 @@ module OpenAI module Models module VectorStores type file_batch_retrieve_params = - { vector_store_id: String } & OpenAI::request_parameters + { vector_store_id: String } & OpenAI::Internal::Type::request_parameters - class FileBatchRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileBatchRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor vector_store_id: String - def initialize: - ( - vector_store_id: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::VectorStores::file_batch_retrieve_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + vector_store_id: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::file_batch_retrieve_params + def to_hash: -> { + vector_store_id: String, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/vector_stores/file_content_params.rbs b/sig/openai/models/vector_stores/file_content_params.rbs index 751c410c..1797ebf6 100644 --- a/sig/openai/models/vector_stores/file_content_params.rbs +++ b/sig/openai/models/vector_stores/file_content_params.rbs @@ -2,25 +2,23 @@ module OpenAI module Models module VectorStores type file_content_params = - { vector_store_id: String } & OpenAI::request_parameters + { vector_store_id: String } & OpenAI::Internal::Type::request_parameters - class FileContentParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileContentParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor vector_store_id: String - def initialize: - ( - vector_store_id: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::VectorStores::file_content_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + vector_store_id: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::file_content_params + def to_hash: -> { + vector_store_id: String, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/vector_stores/file_content_response.rbs b/sig/openai/models/vector_stores/file_content_response.rbs index 3153dccf..200ec37f 100644 --- a/sig/openai/models/vector_stores/file_content_response.rbs +++ b/sig/openai/models/vector_stores/file_content_response.rbs @@ -3,7 +3,7 @@ module OpenAI module VectorStores type file_content_response = { text: String, type: String } - class FileContentResponse < OpenAI::BaseModel + class FileContentResponse < OpenAI::Internal::Type::BaseModel attr_reader text: String? def text=: (String) -> String @@ -12,14 +12,9 @@ module OpenAI def type=: (String) -> String - def initialize: - (text: String, type: String) -> void - | ( - ?OpenAI::Models::VectorStores::file_content_response - | OpenAI::BaseModel data - ) -> void + def initialize: (?text: String, ?type: String) -> void - def to_hash: -> OpenAI::Models::VectorStores::file_content_response + def to_hash: -> { text: String, type: String } end end end diff --git a/sig/openai/models/vector_stores/file_create_params.rbs b/sig/openai/models/vector_stores/file_create_params.rbs index 52eb4cfe..b5505515 100644 --- a/sig/openai/models/vector_stores/file_create_params.rbs +++ b/sig/openai/models/vector_stores/file_create_params.rbs @@ -7,11 +7,11 @@ module OpenAI attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileCreateParams::attribute]?, chunking_strategy: OpenAI::Models::file_chunking_strategy_param } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class FileCreateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileCreateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor file_id: String @@ -23,24 +23,26 @@ module OpenAI OpenAI::Models::file_chunking_strategy_param ) -> OpenAI::Models::file_chunking_strategy_param - def initialize: - ( - file_id: String, - attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileCreateParams::attribute]?, - chunking_strategy: OpenAI::Models::file_chunking_strategy_param, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::VectorStores::file_create_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + file_id: String, + ?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileCreateParams::attribute]?, + ?chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::file_create_params + def to_hash: -> { + file_id: String, + attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileCreateParams::attribute]?, + chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + request_options: OpenAI::RequestOptions + } type attribute = String | Float | bool - class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + module Attribute + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::VectorStores::FileCreateParams::attribute] end end end diff --git a/sig/openai/models/vector_stores/file_delete_params.rbs b/sig/openai/models/vector_stores/file_delete_params.rbs index 212e1e30..06e77cba 100644 --- a/sig/openai/models/vector_stores/file_delete_params.rbs +++ b/sig/openai/models/vector_stores/file_delete_params.rbs @@ -2,25 +2,23 @@ module OpenAI module Models module VectorStores type file_delete_params = - { vector_store_id: String } & OpenAI::request_parameters + { vector_store_id: String } & OpenAI::Internal::Type::request_parameters - class FileDeleteParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileDeleteParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor vector_store_id: String - def initialize: - ( - vector_store_id: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::VectorStores::file_delete_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + vector_store_id: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::file_delete_params + def to_hash: -> { + vector_store_id: String, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/vector_stores/file_list_params.rbs b/sig/openai/models/vector_stores/file_list_params.rbs index 75b43fd1..45e9fc25 100644 --- a/sig/openai/models/vector_stores/file_list_params.rbs +++ b/sig/openai/models/vector_stores/file_list_params.rbs @@ -9,11 +9,11 @@ module OpenAI limit: Integer, order: OpenAI::Models::VectorStores::FileListParams::order } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class FileListParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileListParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_reader after: String? @@ -39,40 +39,46 @@ module OpenAI OpenAI::Models::VectorStores::FileListParams::order ) -> OpenAI::Models::VectorStores::FileListParams::order - def initialize: - ( - after: String, - before: String, - filter: OpenAI::Models::VectorStores::FileListParams::filter, - limit: Integer, - order: OpenAI::Models::VectorStores::FileListParams::order, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::VectorStores::file_list_params - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::VectorStores::file_list_params + def initialize: ( + ?after: String, + ?before: String, + ?filter: OpenAI::Models::VectorStores::FileListParams::filter, + ?limit: Integer, + ?order: OpenAI::Models::VectorStores::FileListParams::order, + ?request_options: OpenAI::request_opts + ) -> void + + def to_hash: -> { + after: String, + before: String, + filter: OpenAI::Models::VectorStores::FileListParams::filter, + limit: Integer, + order: OpenAI::Models::VectorStores::FileListParams::order, + request_options: OpenAI::RequestOptions + } type filter = :in_progress | :completed | :failed | :cancelled - class Filter < OpenAI::Enum + module Filter + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress COMPLETED: :completed FAILED: :failed CANCELLED: :cancelled - def self.values: -> ::Array[OpenAI::Models::VectorStores::FileListParams::filter] + def self?.values: -> ::Array[OpenAI::Models::VectorStores::FileListParams::filter] end type order = :asc | :desc - class Order < OpenAI::Enum + module Order + extend OpenAI::Internal::Type::Enum + ASC: :asc DESC: :desc - def self.values: -> ::Array[OpenAI::Models::VectorStores::FileListParams::order] + def self?.values: -> ::Array[OpenAI::Models::VectorStores::FileListParams::order] end end end diff --git a/sig/openai/models/vector_stores/file_retrieve_params.rbs b/sig/openai/models/vector_stores/file_retrieve_params.rbs index b490be00..c0cac542 100644 --- a/sig/openai/models/vector_stores/file_retrieve_params.rbs +++ b/sig/openai/models/vector_stores/file_retrieve_params.rbs @@ -2,25 +2,23 @@ module OpenAI module Models module VectorStores type file_retrieve_params = - { vector_store_id: String } & OpenAI::request_parameters + { vector_store_id: String } & OpenAI::Internal::Type::request_parameters - class FileRetrieveParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileRetrieveParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor vector_store_id: String - def initialize: - ( - vector_store_id: String, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::VectorStores::file_retrieve_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + vector_store_id: String, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::file_retrieve_params + def to_hash: -> { + vector_store_id: String, + request_options: OpenAI::RequestOptions + } end end end diff --git a/sig/openai/models/vector_stores/file_update_params.rbs b/sig/openai/models/vector_stores/file_update_params.rbs index 7320f7b8..83729e3e 100644 --- a/sig/openai/models/vector_stores/file_update_params.rbs +++ b/sig/openai/models/vector_stores/file_update_params.rbs @@ -6,33 +6,34 @@ module OpenAI vector_store_id: String, attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileUpdateParams::attribute]? } - & OpenAI::request_parameters + & OpenAI::Internal::Type::request_parameters - class FileUpdateParams < OpenAI::BaseModel - extend OpenAI::RequestParameters::Converter - include OpenAI::RequestParameters + class FileUpdateParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters attr_accessor vector_store_id: String attr_accessor attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileUpdateParams::attribute]? - def initialize: - ( - vector_store_id: String, - attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileUpdateParams::attribute]?, - request_options: OpenAI::request_opts - ) -> void - | ( - ?OpenAI::Models::VectorStores::file_update_params - | OpenAI::BaseModel data - ) -> void + def initialize: ( + vector_store_id: String, + attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileUpdateParams::attribute]?, + ?request_options: OpenAI::request_opts + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::file_update_params + def to_hash: -> { + vector_store_id: String, + attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileUpdateParams::attribute]?, + request_options: OpenAI::RequestOptions + } type attribute = String | Float | bool - class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + module Attribute + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::VectorStores::FileUpdateParams::attribute] end end end diff --git a/sig/openai/models/vector_stores/vector_store_file.rbs b/sig/openai/models/vector_stores/vector_store_file.rbs index 3bc27550..3c64122d 100644 --- a/sig/openai/models/vector_stores/vector_store_file.rbs +++ b/sig/openai/models/vector_stores/vector_store_file.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class VectorStoreFile = VectorStores::VectorStoreFile module VectorStores @@ -8,7 +7,7 @@ module OpenAI { id: String, created_at: Integer, - last_error: OpenAI::Models::VectorStores::VectorStoreFile::LastError?, + last_error: OpenAI::VectorStores::VectorStoreFile::LastError?, object: :"vector_store.file", status: OpenAI::Models::VectorStores::VectorStoreFile::status, usage_bytes: Integer, @@ -17,12 +16,12 @@ module OpenAI chunking_strategy: OpenAI::Models::file_chunking_strategy } - class VectorStoreFile < OpenAI::BaseModel + class VectorStoreFile < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created_at: Integer - attr_accessor last_error: OpenAI::Models::VectorStores::VectorStoreFile::LastError? + attr_accessor last_error: OpenAI::VectorStores::VectorStoreFile::LastError? attr_accessor object: :"vector_store.file" @@ -40,24 +39,29 @@ module OpenAI OpenAI::Models::file_chunking_strategy ) -> OpenAI::Models::file_chunking_strategy - def initialize: - ( - id: String, - created_at: Integer, - last_error: OpenAI::Models::VectorStores::VectorStoreFile::LastError?, - status: OpenAI::Models::VectorStores::VectorStoreFile::status, - usage_bytes: Integer, - vector_store_id: String, - attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::VectorStoreFile::attribute]?, - chunking_strategy: OpenAI::Models::file_chunking_strategy, - object: :"vector_store.file" - ) -> void - | ( - ?OpenAI::Models::VectorStores::vector_store_file - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + created_at: Integer, + last_error: OpenAI::VectorStores::VectorStoreFile::LastError?, + status: OpenAI::Models::VectorStores::VectorStoreFile::status, + usage_bytes: Integer, + vector_store_id: String, + ?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::VectorStoreFile::attribute]?, + ?chunking_strategy: OpenAI::Models::file_chunking_strategy, + ?object: :"vector_store.file" + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::vector_store_file + def to_hash: -> { + id: String, + created_at: Integer, + last_error: OpenAI::VectorStores::VectorStoreFile::LastError?, + object: :"vector_store.file", + status: OpenAI::Models::VectorStores::VectorStoreFile::status, + usage_bytes: Integer, + vector_store_id: String, + attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::VectorStoreFile::attribute]?, + chunking_strategy: OpenAI::Models::file_chunking_strategy + } type last_error = { @@ -65,49 +69,53 @@ module OpenAI message: String } - class LastError < OpenAI::BaseModel + class LastError < OpenAI::Internal::Type::BaseModel attr_accessor code: OpenAI::Models::VectorStores::VectorStoreFile::LastError::code attr_accessor message: String - def initialize: - ( - code: OpenAI::Models::VectorStores::VectorStoreFile::LastError::code, - message: String - ) -> void - | ( - ?OpenAI::Models::VectorStores::VectorStoreFile::last_error - | OpenAI::BaseModel data - ) -> void + def initialize: ( + code: OpenAI::Models::VectorStores::VectorStoreFile::LastError::code, + message: String + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::VectorStoreFile::last_error + def to_hash: -> { + code: OpenAI::Models::VectorStores::VectorStoreFile::LastError::code, + message: String + } type code = :server_error | :unsupported_file | :invalid_file - class Code < OpenAI::Enum + module Code + extend OpenAI::Internal::Type::Enum + SERVER_ERROR: :server_error UNSUPPORTED_FILE: :unsupported_file INVALID_FILE: :invalid_file - def self.values: -> ::Array[OpenAI::Models::VectorStores::VectorStoreFile::LastError::code] + def self?.values: -> ::Array[OpenAI::Models::VectorStores::VectorStoreFile::LastError::code] end end type status = :in_progress | :completed | :cancelled | :failed - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress COMPLETED: :completed CANCELLED: :cancelled FAILED: :failed - def self.values: -> ::Array[OpenAI::Models::VectorStores::VectorStoreFile::status] + def self?.values: -> ::Array[OpenAI::Models::VectorStores::VectorStoreFile::status] end type attribute = String | Float | bool - class Attribute < OpenAI::Union - private def self.variants: -> [[nil, String], [nil, Float], [nil, bool]] + module Attribute + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::VectorStores::VectorStoreFile::attribute] end end end diff --git a/sig/openai/models/vector_stores/vector_store_file_batch.rbs b/sig/openai/models/vector_stores/vector_store_file_batch.rbs index 452a536f..6ad78bb0 100644 --- a/sig/openai/models/vector_stores/vector_store_file_batch.rbs +++ b/sig/openai/models/vector_stores/vector_store_file_batch.rbs @@ -1,6 +1,5 @@ module OpenAI module Models - class VectorStoreFileBatch = VectorStores::VectorStoreFileBatch module VectorStores @@ -8,18 +7,18 @@ module OpenAI { id: String, created_at: Integer, - file_counts: OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts, + file_counts: OpenAI::VectorStores::VectorStoreFileBatch::FileCounts, object: :"vector_store.files_batch", status: OpenAI::Models::VectorStores::VectorStoreFileBatch::status, vector_store_id: String } - class VectorStoreFileBatch < OpenAI::BaseModel + class VectorStoreFileBatch < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor created_at: Integer - attr_accessor file_counts: OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts + attr_accessor file_counts: OpenAI::VectorStores::VectorStoreFileBatch::FileCounts attr_accessor object: :"vector_store.files_batch" @@ -27,21 +26,23 @@ module OpenAI attr_accessor vector_store_id: String - def initialize: - ( - id: String, - created_at: Integer, - file_counts: OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts, - status: OpenAI::Models::VectorStores::VectorStoreFileBatch::status, - vector_store_id: String, - object: :"vector_store.files_batch" - ) -> void - | ( - ?OpenAI::Models::VectorStores::vector_store_file_batch - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + created_at: Integer, + file_counts: OpenAI::VectorStores::VectorStoreFileBatch::FileCounts, + status: OpenAI::Models::VectorStores::VectorStoreFileBatch::status, + vector_store_id: String, + ?object: :"vector_store.files_batch" + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::vector_store_file_batch + def to_hash: -> { + id: String, + created_at: Integer, + file_counts: OpenAI::VectorStores::VectorStoreFileBatch::FileCounts, + object: :"vector_store.files_batch", + status: OpenAI::Models::VectorStores::VectorStoreFileBatch::status, + vector_store_id: String + } type file_counts = { @@ -52,7 +53,7 @@ module OpenAI total: Integer } - class FileCounts < OpenAI::BaseModel + class FileCounts < OpenAI::Internal::Type::BaseModel attr_accessor cancelled: Integer attr_accessor completed: Integer @@ -63,31 +64,34 @@ module OpenAI attr_accessor total: Integer - def initialize: - ( - cancelled: Integer, - completed: Integer, - failed: Integer, - in_progress: Integer, - total: Integer - ) -> void - | ( - ?OpenAI::Models::VectorStores::VectorStoreFileBatch::file_counts - | OpenAI::BaseModel data - ) -> void - - def to_hash: -> OpenAI::Models::VectorStores::VectorStoreFileBatch::file_counts + def initialize: ( + cancelled: Integer, + completed: Integer, + failed: Integer, + in_progress: Integer, + total: Integer + ) -> void + + def to_hash: -> { + cancelled: Integer, + completed: Integer, + failed: Integer, + in_progress: Integer, + total: Integer + } end type status = :in_progress | :completed | :cancelled | :failed - class Status < OpenAI::Enum + module Status + extend OpenAI::Internal::Type::Enum + IN_PROGRESS: :in_progress COMPLETED: :completed CANCELLED: :cancelled FAILED: :failed - def self.values: -> ::Array[OpenAI::Models::VectorStores::VectorStoreFileBatch::status] + def self?.values: -> ::Array[OpenAI::Models::VectorStores::VectorStoreFileBatch::status] end end end diff --git a/sig/openai/models/vector_stores/vector_store_file_deleted.rbs b/sig/openai/models/vector_stores/vector_store_file_deleted.rbs index fe8437d3..235b13ae 100644 --- a/sig/openai/models/vector_stores/vector_store_file_deleted.rbs +++ b/sig/openai/models/vector_stores/vector_store_file_deleted.rbs @@ -1,31 +1,29 @@ module OpenAI module Models - class VectorStoreFileDeleted = VectorStores::VectorStoreFileDeleted module VectorStores type vector_store_file_deleted = { id: String, deleted: bool, object: :"vector_store.file.deleted" } - class VectorStoreFileDeleted < OpenAI::BaseModel + class VectorStoreFileDeleted < OpenAI::Internal::Type::BaseModel attr_accessor id: String attr_accessor deleted: bool attr_accessor object: :"vector_store.file.deleted" - def initialize: - ( - id: String, - deleted: bool, - object: :"vector_store.file.deleted" - ) -> void - | ( - ?OpenAI::Models::VectorStores::vector_store_file_deleted - | OpenAI::BaseModel data - ) -> void + def initialize: ( + id: String, + deleted: bool, + ?object: :"vector_store.file.deleted" + ) -> void - def to_hash: -> OpenAI::Models::VectorStores::vector_store_file_deleted + def to_hash: -> { + id: String, + deleted: bool, + object: :"vector_store.file.deleted" + } end end end diff --git a/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs b/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs new file mode 100644 index 00000000..9efbe689 --- /dev/null +++ b/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type batch_cancelled_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchCancelledWebhookEvent::Data, + type: :"batch.cancelled", + object: OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::object + } + + class BatchCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::BatchCancelledWebhookEvent::Data + + attr_accessor type: :"batch.cancelled" + + attr_reader object: OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::object + ) -> OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchCancelledWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::object, + ?type: :"batch.cancelled" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchCancelledWebhookEvent::Data, + type: :"batch.cancelled", + object: OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/batch_completed_webhook_event.rbs b/sig/openai/models/webhooks/batch_completed_webhook_event.rbs new file mode 100644 index 00000000..2a113018 --- /dev/null +++ b/sig/openai/models/webhooks/batch_completed_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type batch_completed_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchCompletedWebhookEvent::Data, + type: :"batch.completed", + object: OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::object + } + + class BatchCompletedWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::BatchCompletedWebhookEvent::Data + + attr_accessor type: :"batch.completed" + + attr_reader object: OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::object + ) -> OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchCompletedWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::object, + ?type: :"batch.completed" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchCompletedWebhookEvent::Data, + type: :"batch.completed", + object: OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/batch_expired_webhook_event.rbs b/sig/openai/models/webhooks/batch_expired_webhook_event.rbs new file mode 100644 index 00000000..8f6d4f00 --- /dev/null +++ b/sig/openai/models/webhooks/batch_expired_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type batch_expired_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchExpiredWebhookEvent::Data, + type: :"batch.expired", + object: OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::object + } + + class BatchExpiredWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::BatchExpiredWebhookEvent::Data + + attr_accessor type: :"batch.expired" + + attr_reader object: OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::object + ) -> OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchExpiredWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::object, + ?type: :"batch.expired" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchExpiredWebhookEvent::Data, + type: :"batch.expired", + object: OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/batch_failed_webhook_event.rbs b/sig/openai/models/webhooks/batch_failed_webhook_event.rbs new file mode 100644 index 00000000..a7426344 --- /dev/null +++ b/sig/openai/models/webhooks/batch_failed_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type batch_failed_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchFailedWebhookEvent::Data, + type: :"batch.failed", + object: OpenAI::Models::Webhooks::BatchFailedWebhookEvent::object + } + + class BatchFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::BatchFailedWebhookEvent::Data + + attr_accessor type: :"batch.failed" + + attr_reader object: OpenAI::Models::Webhooks::BatchFailedWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::BatchFailedWebhookEvent::object + ) -> OpenAI::Models::Webhooks::BatchFailedWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchFailedWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::BatchFailedWebhookEvent::object, + ?type: :"batch.failed" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::BatchFailedWebhookEvent::Data, + type: :"batch.failed", + object: OpenAI::Models::Webhooks::BatchFailedWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::BatchFailedWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs b/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs new file mode 100644 index 00000000..f1a415a4 --- /dev/null +++ b/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type eval_run_canceled_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data, + type: :"eval.run.canceled", + object: OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::object + } + + class EvalRunCanceledWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data + + attr_accessor type: :"eval.run.canceled" + + attr_reader object: OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::object + ) -> OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::object, + ?type: :"eval.run.canceled" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data, + type: :"eval.run.canceled", + object: OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs b/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs new file mode 100644 index 00000000..7cad463b --- /dev/null +++ b/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type eval_run_failed_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data, + type: :"eval.run.failed", + object: OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::object + } + + class EvalRunFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data + + attr_accessor type: :"eval.run.failed" + + attr_reader object: OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::object + ) -> OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::object, + ?type: :"eval.run.failed" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data, + type: :"eval.run.failed", + object: OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs b/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs new file mode 100644 index 00000000..22ecc65d --- /dev/null +++ b/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type eval_run_succeeded_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data, + type: :"eval.run.succeeded", + object: OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::object + } + + class EvalRunSucceededWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data + + attr_accessor type: :"eval.run.succeeded" + + attr_reader object: OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::object + ) -> OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::object, + ?type: :"eval.run.succeeded" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data, + type: :"eval.run.succeeded", + object: OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs b/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs new file mode 100644 index 00000000..b60d0577 --- /dev/null +++ b/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type fine_tuning_job_cancelled_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data, + type: :"fine_tuning.job.cancelled", + object: OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::object + } + + class FineTuningJobCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data + + attr_accessor type: :"fine_tuning.job.cancelled" + + attr_reader object: OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::object + ) -> OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::object, + ?type: :"fine_tuning.job.cancelled" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data, + type: :"fine_tuning.job.cancelled", + object: OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs b/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs new file mode 100644 index 00000000..007817cf --- /dev/null +++ b/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type fine_tuning_job_failed_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data, + type: :"fine_tuning.job.failed", + object: OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::object + } + + class FineTuningJobFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data + + attr_accessor type: :"fine_tuning.job.failed" + + attr_reader object: OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::object + ) -> OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::object, + ?type: :"fine_tuning.job.failed" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data, + type: :"fine_tuning.job.failed", + object: OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs b/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs new file mode 100644 index 00000000..f3c862eb --- /dev/null +++ b/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type fine_tuning_job_succeeded_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data, + type: :"fine_tuning.job.succeeded", + object: OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::object + } + + class FineTuningJobSucceededWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data + + attr_accessor type: :"fine_tuning.job.succeeded" + + attr_reader object: OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::object + ) -> OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::object, + ?type: :"fine_tuning.job.succeeded" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data, + type: :"fine_tuning.job.succeeded", + object: OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs b/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs new file mode 100644 index 00000000..bac1c69e --- /dev/null +++ b/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type response_cancelled_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data, + type: :"response.cancelled", + object: OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::object + } + + class ResponseCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data + + attr_accessor type: :"response.cancelled" + + attr_reader object: OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::object + ) -> OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::object, + ?type: :"response.cancelled" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data, + type: :"response.cancelled", + object: OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/response_completed_webhook_event.rbs b/sig/openai/models/webhooks/response_completed_webhook_event.rbs new file mode 100644 index 00000000..e80a2f95 --- /dev/null +++ b/sig/openai/models/webhooks/response_completed_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type response_completed_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data, + type: :"response.completed", + object: OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::object + } + + class ResponseCompletedWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data + + attr_accessor type: :"response.completed" + + attr_reader object: OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::object + ) -> OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::object, + ?type: :"response.completed" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data, + type: :"response.completed", + object: OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/response_failed_webhook_event.rbs b/sig/openai/models/webhooks/response_failed_webhook_event.rbs new file mode 100644 index 00000000..3b0b25b2 --- /dev/null +++ b/sig/openai/models/webhooks/response_failed_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type response_failed_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseFailedWebhookEvent::Data, + type: :"response.failed", + object: OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::object + } + + class ResponseFailedWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::ResponseFailedWebhookEvent::Data + + attr_accessor type: :"response.failed" + + attr_reader object: OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::object + ) -> OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseFailedWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::object, + ?type: :"response.failed" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseFailedWebhookEvent::Data, + type: :"response.failed", + object: OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs b/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs new file mode 100644 index 00000000..4e2b3970 --- /dev/null +++ b/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs @@ -0,0 +1,66 @@ +module OpenAI + module Models + module Webhooks + type response_incomplete_webhook_event = + { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data, + type: :"response.incomplete", + object: OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::object + } + + class ResponseIncompleteWebhookEvent < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor created_at: Integer + + attr_accessor data: OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data + + attr_accessor type: :"response.incomplete" + + attr_reader object: OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::object? + + def object=: ( + OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::object + ) -> OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::object + + def initialize: ( + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data, + ?object: OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::object, + ?type: :"response.incomplete" + ) -> void + + def to_hash: -> { + id: String, + created_at: Integer, + data: OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data, + type: :"response.incomplete", + object: OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::object + } + + type data = { id: String } + + class Data < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + def initialize: (id: String) -> void + + def to_hash: -> { id: String } + end + + type object = :event + + module Object + extend OpenAI::Internal::Type::Enum + + EVENT: :event + + def self?.values: -> ::Array[OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::object] + end + end + end + end +end diff --git a/sig/openai/models/webhooks/unwrap_webhook_event.rbs b/sig/openai/models/webhooks/unwrap_webhook_event.rbs new file mode 100644 index 00000000..906b2b2f --- /dev/null +++ b/sig/openai/models/webhooks/unwrap_webhook_event.rbs @@ -0,0 +1,27 @@ +module OpenAI + module Models + module Webhooks + type unwrap_webhook_event = + OpenAI::Webhooks::BatchCancelledWebhookEvent + | OpenAI::Webhooks::BatchCompletedWebhookEvent + | OpenAI::Webhooks::BatchExpiredWebhookEvent + | OpenAI::Webhooks::BatchFailedWebhookEvent + | OpenAI::Webhooks::EvalRunCanceledWebhookEvent + | OpenAI::Webhooks::EvalRunFailedWebhookEvent + | OpenAI::Webhooks::EvalRunSucceededWebhookEvent + | OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent + | OpenAI::Webhooks::FineTuningJobFailedWebhookEvent + | OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent + | OpenAI::Webhooks::ResponseCancelledWebhookEvent + | OpenAI::Webhooks::ResponseCompletedWebhookEvent + | OpenAI::Webhooks::ResponseFailedWebhookEvent + | OpenAI::Webhooks::ResponseIncompleteWebhookEvent + + module UnwrapWebhookEvent + extend OpenAI::Internal::Type::Union + + def self?.variants: -> ::Array[OpenAI::Models::Webhooks::unwrap_webhook_event] + end + end + end +end diff --git a/sig/openai/models/webhooks/webhook_unwrap_params.rbs b/sig/openai/models/webhooks/webhook_unwrap_params.rbs new file mode 100644 index 00000000..58737bf3 --- /dev/null +++ b/sig/openai/models/webhooks/webhook_unwrap_params.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Webhooks + type webhook_unwrap_params = + { } & OpenAI::Internal::Type::request_parameters + + class WebhookUnwrapParams < OpenAI::Internal::Type::BaseModel + extend OpenAI::Internal::Type::RequestParameters::Converter + include OpenAI::Internal::Type::RequestParameters + + def initialize: (?request_options: OpenAI::request_opts) -> void + + def to_hash: -> { request_options: OpenAI::RequestOptions } + end + end + end +end diff --git a/sig/openai/page.rbs b/sig/openai/page.rbs deleted file mode 100644 index 94bcf79a..00000000 --- a/sig/openai/page.rbs +++ /dev/null @@ -1,16 +0,0 @@ -module OpenAI - class Page[Elem] - include OpenAI::BasePage[Elem] - - attr_accessor data: ::Array[Elem] - - attr_accessor object: String - - def initialize: ( - client: OpenAI::BaseClient, - req: OpenAI::BaseClient::request_components, - headers: ::Hash[String, String], - page_data: ::Array[top] - ) -> void - end -end diff --git a/sig/openai/pooled_net_requester.rbs b/sig/openai/pooled_net_requester.rbs deleted file mode 100644 index 4f89cf8d..00000000 --- a/sig/openai/pooled_net_requester.rbs +++ /dev/null @@ -1,26 +0,0 @@ -module OpenAI - class PooledNetRequester - type request = - { - method: Symbol, - url: URI::Generic, - headers: ::Hash[String, String], - body: top, - deadline: Float - } - - def self.connect: (URI::Generic url) -> top - - def self.calibrate_socket_timeout: (top conn, Float deadline) -> void - - def self.build_request: (OpenAI::PooledNetRequester::request request) -> top - - private def with_pool: (URI::Generic url) { (top arg0) -> void } -> void - - def execute: ( - OpenAI::PooledNetRequester::request request - ) -> [top, Enumerable[String]] - - def initialize: -> void - end -end diff --git a/sig/openai/request_options.rbs b/sig/openai/request_options.rbs index 97561491..0b21f3e7 100644 --- a/sig/openai/request_options.rbs +++ b/sig/openai/request_options.rbs @@ -2,28 +2,18 @@ module OpenAI type request_opts = OpenAI::RequestOptions | OpenAI::request_options | ::Hash[Symbol, top] - type request_parameters = { request_options: OpenAI::request_opts } - - module RequestParameters - attr_accessor request_options: OpenAI::request_opts - - module Converter - def dump_request: (top params) -> [top, ::Hash[Symbol, top]] - end - end - type request_options = { idempotency_key: String?, extra_query: ::Hash[String, (::Array[String] | String)?]?, extra_headers: ::Hash[String, String?]?, - extra_body: ::Hash[Symbol, top]?, + extra_body: top?, max_retries: Integer?, timeout: Float? } - class RequestOptions < OpenAI::BaseModel - def self.validate!: (self | ::Hash[Symbol, top] opts) -> void + class RequestOptions < OpenAI::Internal::Type::BaseModel + def self.validate!: (OpenAI::request_opts opts) -> void attr_accessor idempotency_key: String? @@ -31,7 +21,7 @@ module OpenAI attr_accessor extra_headers: ::Hash[String, String?]? - attr_accessor extra_body: ::Hash[Symbol, top]? + attr_accessor extra_body: top? attr_accessor max_retries: Integer? diff --git a/sig/openai/resources/audio/speech.rbs b/sig/openai/resources/audio/speech.rbs index e537a18a..7155cd0b 100644 --- a/sig/openai/resources/audio/speech.rbs +++ b/sig/openai/resources/audio/speech.rbs @@ -2,19 +2,16 @@ module OpenAI module Resources class Audio class Speech - def create: - ( - OpenAI::Models::Audio::SpeechCreateParams - | ::Hash[Symbol, top] params - ) -> top - | ( - input: String, - model: OpenAI::Models::Audio::SpeechCreateParams::model, - voice: OpenAI::Models::Audio::SpeechCreateParams::voice, - response_format: OpenAI::Models::Audio::SpeechCreateParams::response_format, - speed: Float, - request_options: OpenAI::request_opts - ) -> top + def create: ( + input: String, + model: OpenAI::Models::Audio::SpeechCreateParams::model, + voice: OpenAI::Models::Audio::SpeechCreateParams::voice, + ?instructions: String, + ?response_format: OpenAI::Models::Audio::SpeechCreateParams::response_format, + ?speed: Float, + ?stream_format: OpenAI::Models::Audio::SpeechCreateParams::stream_format, + ?request_options: OpenAI::request_opts + ) -> StringIO def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/audio/transcriptions.rbs b/sig/openai/resources/audio/transcriptions.rbs index d16f632f..0130f147 100644 --- a/sig/openai/resources/audio/transcriptions.rbs +++ b/sig/openai/resources/audio/transcriptions.rbs @@ -2,21 +2,31 @@ module OpenAI module Resources class Audio class Transcriptions - def create: - ( - OpenAI::Models::Audio::TranscriptionCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Audio::transcription_create_response - | ( - file: IO | StringIO, - model: OpenAI::Models::Audio::TranscriptionCreateParams::model, - language: String, - prompt: String, - response_format: OpenAI::Models::audio_response_format, - temperature: Float, - timestamp_granularities: ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity], - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Audio::transcription_create_response + def create: ( + file: OpenAI::Internal::file_input, + model: OpenAI::Models::Audio::TranscriptionCreateParams::model, + ?chunking_strategy: OpenAI::Models::Audio::TranscriptionCreateParams::chunking_strategy?, + ?include: ::Array[OpenAI::Models::Audio::transcription_include], + ?language: String, + ?prompt: String, + ?response_format: OpenAI::Models::audio_response_format, + ?temperature: Float, + ?timestamp_granularities: ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity], + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::Audio::transcription_create_response + + def create_streaming: ( + file: OpenAI::Internal::file_input, + model: OpenAI::Models::Audio::TranscriptionCreateParams::model, + ?chunking_strategy: OpenAI::Models::Audio::TranscriptionCreateParams::chunking_strategy?, + ?include: ::Array[OpenAI::Models::Audio::transcription_include], + ?language: String, + ?prompt: String, + ?response_format: OpenAI::Models::audio_response_format, + ?temperature: Float, + ?timestamp_granularities: ::Array[OpenAI::Models::Audio::TranscriptionCreateParams::timestamp_granularity], + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::Audio::transcription_stream_event] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/audio/translations.rbs b/sig/openai/resources/audio/translations.rbs index 540b8eaf..bd3560b7 100644 --- a/sig/openai/resources/audio/translations.rbs +++ b/sig/openai/resources/audio/translations.rbs @@ -2,19 +2,14 @@ module OpenAI module Resources class Audio class Translations - def create: - ( - OpenAI::Models::Audio::TranslationCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Audio::translation_create_response - | ( - file: IO | StringIO, - model: OpenAI::Models::Audio::TranslationCreateParams::model, - prompt: String, - response_format: OpenAI::Models::audio_response_format, - temperature: Float, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Audio::translation_create_response + def create: ( + file: OpenAI::Internal::file_input, + model: OpenAI::Models::Audio::TranslationCreateParams::model, + ?prompt: String, + ?response_format: OpenAI::Models::Audio::TranslationCreateParams::response_format, + ?temperature: Float, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::Audio::translation_create_response def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/batches.rbs b/sig/openai/resources/batches.rbs index 8d004f2c..ca6f761c 100644 --- a/sig/openai/resources/batches.rbs +++ b/sig/openai/resources/batches.rbs @@ -1,47 +1,30 @@ module OpenAI module Resources class Batches - def create: - ( - OpenAI::Models::BatchCreateParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Batch - | ( - completion_window: OpenAI::Models::BatchCreateParams::completion_window, - endpoint: OpenAI::Models::BatchCreateParams::endpoint, - input_file_id: String, - metadata: OpenAI::Models::metadata?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Batch + def create: ( + completion_window: OpenAI::Models::BatchCreateParams::completion_window, + endpoint: OpenAI::Models::BatchCreateParams::endpoint, + input_file_id: String, + ?metadata: OpenAI::Models::metadata?, + ?output_expires_after: OpenAI::BatchCreateParams::OutputExpiresAfter, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Batch - def retrieve: - ( - String batch_id, - ?OpenAI::Models::BatchRetrieveParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Batch - | ( - String batch_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Batch + def retrieve: ( + String batch_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Batch - def list: - ( - ?OpenAI::Models::BatchListParams | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::Batch] - | ( - after: String, - limit: Integer, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::Batch] + def list: ( + ?after: String, + ?limit: Integer, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Batch] - def cancel: - ( - String batch_id, - ?OpenAI::Models::BatchCancelParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Batch - | ( - String batch_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Batch + def cancel: ( + String batch_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Batch def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/beta/assistants.rbs b/sig/openai/resources/beta/assistants.rbs index 50acef87..223b3916 100644 --- a/sig/openai/resources/beta/assistants.rbs +++ b/sig/openai/resources/beta/assistants.rbs @@ -2,82 +2,54 @@ module OpenAI module Resources class Beta class Assistants - def create: - ( - OpenAI::Models::Beta::AssistantCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Assistant - | ( - model: OpenAI::Models::Beta::AssistantCreateParams::model, - description: String?, - instructions: String?, - metadata: OpenAI::Models::metadata?, - name: String?, - reasoning_effort: OpenAI::Models::reasoning_effort?, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - tool_resources: OpenAI::Models::Beta::AssistantCreateParams::ToolResources?, - tools: ::Array[OpenAI::Models::Beta::assistant_tool], - top_p: Float?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Assistant + def create: ( + model: OpenAI::Models::Beta::AssistantCreateParams::model, + ?description: String?, + ?instructions: String?, + ?metadata: OpenAI::Models::metadata?, + ?name: String?, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?tool_resources: OpenAI::Beta::AssistantCreateParams::ToolResources?, + ?tools: ::Array[OpenAI::Models::Beta::assistant_tool], + ?top_p: Float?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Assistant - def retrieve: - ( - String assistant_id, - ?OpenAI::Models::Beta::AssistantRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Assistant - | ( - String assistant_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Assistant + def retrieve: ( + String assistant_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Assistant - def update: - ( - String assistant_id, - ?OpenAI::Models::Beta::AssistantUpdateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Assistant - | ( - String assistant_id, - description: String?, - instructions: String?, - metadata: OpenAI::Models::metadata?, - model: OpenAI::Models::Beta::AssistantUpdateParams::model, - name: String?, - reasoning_effort: OpenAI::Models::reasoning_effort?, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - tool_resources: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources?, - tools: ::Array[OpenAI::Models::Beta::assistant_tool], - top_p: Float?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Assistant + def update: ( + String assistant_id, + ?description: String?, + ?instructions: String?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::Beta::AssistantUpdateParams::model, + ?name: String?, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?tool_resources: OpenAI::Beta::AssistantUpdateParams::ToolResources?, + ?tools: ::Array[OpenAI::Models::Beta::assistant_tool], + ?top_p: Float?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Assistant - def list: - ( - ?OpenAI::Models::Beta::AssistantListParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::Beta::Assistant] - | ( - after: String, - before: String, - limit: Integer, - order: OpenAI::Models::Beta::AssistantListParams::order, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::Beta::Assistant] + def list: ( + ?after: String, + ?before: String, + ?limit: Integer, + ?order: OpenAI::Models::Beta::AssistantListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Beta::Assistant] - def delete: - ( - String assistant_id, - ?OpenAI::Models::Beta::AssistantDeleteParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::AssistantDeleted - | ( - String assistant_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::AssistantDeleted + def delete: ( + String assistant_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::AssistantDeleted def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/beta/threads.rbs b/sig/openai/resources/beta/threads.rbs index b825e4a8..27b8eeaa 100644 --- a/sig/openai/resources/beta/threads.rbs +++ b/sig/openai/resources/beta/threads.rbs @@ -6,100 +6,67 @@ module OpenAI attr_reader messages: OpenAI::Resources::Beta::Threads::Messages - def create: - ( - ?OpenAI::Models::Beta::ThreadCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Thread - | ( - messages: ::Array[OpenAI::Models::Beta::ThreadCreateParams::Message], - metadata: OpenAI::Models::metadata?, - tool_resources: OpenAI::Models::Beta::ThreadCreateParams::ToolResources?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Thread + def create: ( + ?messages: ::Array[OpenAI::Beta::ThreadCreateParams::Message], + ?metadata: OpenAI::Models::metadata?, + ?tool_resources: OpenAI::Beta::ThreadCreateParams::ToolResources?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Thread - def retrieve: - ( - String thread_id, - ?OpenAI::Models::Beta::ThreadRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Thread - | ( - String thread_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Thread + def retrieve: ( + String thread_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Thread - def update: - ( - String thread_id, - ?OpenAI::Models::Beta::ThreadUpdateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Thread - | ( - String thread_id, - metadata: OpenAI::Models::metadata?, - tool_resources: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Thread + def update: ( + String thread_id, + ?metadata: OpenAI::Models::metadata?, + ?tool_resources: OpenAI::Beta::ThreadUpdateParams::ToolResources?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Thread - def delete: - ( - String thread_id, - ?OpenAI::Models::Beta::ThreadDeleteParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::ThreadDeleted - | ( - String thread_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::ThreadDeleted + def delete: ( + String thread_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::ThreadDeleted - def create_and_run: - ( - OpenAI::Models::Beta::ThreadCreateAndRunParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::Run - | ( - assistant_id: String, - instructions: String?, - max_completion_tokens: Integer?, - max_prompt_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - model: OpenAI::Models::Beta::ThreadCreateAndRunParams::model?, - parallel_tool_calls: bool, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - thread: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, - tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, - tool_resources: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources?, - tools: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::tool]?, - top_p: Float?, - truncation_strategy: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::Run + def create_and_run: ( + assistant_id: String, + ?instructions: String?, + ?max_completion_tokens: Integer?, + ?max_prompt_tokens: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::Beta::ThreadCreateAndRunParams::model?, + ?parallel_tool_calls: bool, + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread, + ?tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, + ?tool_resources: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources?, + ?tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, + ?top_p: Float?, + ?truncation_strategy: OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::Run - def create_and_run_streaming: - ( - OpenAI::Models::Beta::ThreadCreateAndRunParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Stream[OpenAI::Models::Beta::assistant_stream_event] - | ( - assistant_id: String, - instructions: String?, - max_completion_tokens: Integer?, - max_prompt_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - model: OpenAI::Models::Beta::ThreadCreateAndRunParams::model?, - parallel_tool_calls: bool, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - thread: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, - tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, - tool_resources: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources?, - tools: ::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::tool]?, - top_p: Float?, - truncation_strategy: OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy?, - request_options: OpenAI::request_opts - ) -> OpenAI::Stream[OpenAI::Models::Beta::assistant_stream_event] + def stream_raw: ( + assistant_id: String, + ?instructions: String?, + ?max_completion_tokens: Integer?, + ?max_prompt_tokens: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::Beta::ThreadCreateAndRunParams::model?, + ?parallel_tool_calls: bool, + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?thread: OpenAI::Beta::ThreadCreateAndRunParams::Thread, + ?tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, + ?tool_resources: OpenAI::Beta::ThreadCreateAndRunParams::ToolResources?, + ?tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, + ?top_p: Float?, + ?truncation_strategy: OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::Beta::assistant_stream_event] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/beta/threads/messages.rbs b/sig/openai/resources/beta/threads/messages.rbs index 3c37a9d8..8a02c24b 100644 --- a/sig/openai/resources/beta/threads/messages.rbs +++ b/sig/openai/resources/beta/threads/messages.rbs @@ -3,73 +3,43 @@ module OpenAI class Beta class Threads class Messages - def create: - ( - String thread_id, - OpenAI::Models::Beta::Threads::MessageCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::Message - | ( - String thread_id, - content: OpenAI::Models::Beta::Threads::MessageCreateParams::content, - role: OpenAI::Models::Beta::Threads::MessageCreateParams::role, - attachments: ::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment]?, - metadata: OpenAI::Models::metadata?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::Message + def create: ( + String thread_id, + content: OpenAI::Models::Beta::Threads::MessageCreateParams::content, + role: OpenAI::Models::Beta::Threads::MessageCreateParams::role, + ?attachments: ::Array[OpenAI::Beta::Threads::MessageCreateParams::Attachment]?, + ?metadata: OpenAI::Models::metadata?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::Message - def retrieve: - ( - String message_id, - OpenAI::Models::Beta::Threads::MessageRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::Message - | ( - String message_id, - thread_id: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::Message + def retrieve: ( + String message_id, + thread_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::Message - def update: - ( - String message_id, - OpenAI::Models::Beta::Threads::MessageUpdateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::Message - | ( - String message_id, - thread_id: String, - metadata: OpenAI::Models::metadata?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::Message + def update: ( + String message_id, + thread_id: String, + ?metadata: OpenAI::Models::metadata?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::Message - def list: - ( - String thread_id, - ?OpenAI::Models::Beta::Threads::MessageListParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Message] - | ( - String thread_id, - after: String, - before: String, - limit: Integer, - order: OpenAI::Models::Beta::Threads::MessageListParams::order, - run_id: String, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Message] + def list: ( + String thread_id, + ?after: String, + ?before: String, + ?limit: Integer, + ?order: OpenAI::Models::Beta::Threads::MessageListParams::order, + ?run_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Beta::Threads::Message] - def delete: - ( - String message_id, - OpenAI::Models::Beta::Threads::MessageDeleteParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::MessageDeleted - | ( - String message_id, - thread_id: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::MessageDeleted + def delete: ( + String message_id, + thread_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::MessageDeleted def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/beta/threads/runs.rbs b/sig/openai/resources/beta/threads/runs.rbs index a0c45000..38743701 100644 --- a/sig/openai/resources/beta/threads/runs.rbs +++ b/sig/openai/resources/beta/threads/runs.rbs @@ -5,139 +5,91 @@ module OpenAI class Runs attr_reader steps: OpenAI::Resources::Beta::Threads::Runs::Steps - def create: - ( - String thread_id, - OpenAI::Models::Beta::Threads::RunCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::Run - | ( - String thread_id, - assistant_id: String, - include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], - additional_instructions: String?, - additional_messages: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage]?, - instructions: String?, - max_completion_tokens: Integer?, - max_prompt_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - model: OpenAI::Models::Beta::Threads::RunCreateParams::model?, - parallel_tool_calls: bool, - reasoning_effort: OpenAI::Models::reasoning_effort?, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, - tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, - top_p: Float?, - truncation_strategy: OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::Run + def create: ( + String thread_id, + assistant_id: String, + ?include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], + ?additional_instructions: String?, + ?additional_messages: ::Array[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage]?, + ?instructions: String?, + ?max_completion_tokens: Integer?, + ?max_prompt_tokens: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::Beta::Threads::RunCreateParams::model?, + ?parallel_tool_calls: bool, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, + ?tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, + ?top_p: Float?, + ?truncation_strategy: OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::Run - def create_streaming: - ( - String thread_id, - OpenAI::Models::Beta::Threads::RunCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Stream[OpenAI::Models::Beta::assistant_stream_event] - | ( - String thread_id, - assistant_id: String, - include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], - additional_instructions: String?, - additional_messages: ::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage]?, - instructions: String?, - max_completion_tokens: Integer?, - max_prompt_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - model: OpenAI::Models::Beta::Threads::RunCreateParams::model?, - parallel_tool_calls: bool, - reasoning_effort: OpenAI::Models::reasoning_effort?, - response_format: OpenAI::Models::Beta::assistant_response_format_option?, - temperature: Float?, - tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, - tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, - top_p: Float?, - truncation_strategy: OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy?, - request_options: OpenAI::request_opts - ) -> OpenAI::Stream[OpenAI::Models::Beta::assistant_stream_event] + def create_stream_raw: ( + String thread_id, + assistant_id: String, + ?include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], + ?additional_instructions: String?, + ?additional_messages: ::Array[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage]?, + ?instructions: String?, + ?max_completion_tokens: Integer?, + ?max_prompt_tokens: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::Beta::Threads::RunCreateParams::model?, + ?parallel_tool_calls: bool, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?response_format: OpenAI::Models::Beta::assistant_response_format_option?, + ?temperature: Float?, + ?tool_choice: OpenAI::Models::Beta::assistant_tool_choice_option?, + ?tools: ::Array[OpenAI::Models::Beta::assistant_tool]?, + ?top_p: Float?, + ?truncation_strategy: OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::Beta::assistant_stream_event] - def retrieve: - ( - String run_id, - OpenAI::Models::Beta::Threads::RunRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::Run - | ( - String run_id, - thread_id: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::Run + def retrieve: ( + String run_id, + thread_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::Run - def update: - ( - String run_id, - OpenAI::Models::Beta::Threads::RunUpdateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::Run - | ( - String run_id, - thread_id: String, - metadata: OpenAI::Models::metadata?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::Run + def update: ( + String run_id, + thread_id: String, + ?metadata: OpenAI::Models::metadata?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::Run - def list: - ( - String thread_id, - ?OpenAI::Models::Beta::Threads::RunListParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Run] - | ( - String thread_id, - after: String, - before: String, - limit: Integer, - order: OpenAI::Models::Beta::Threads::RunListParams::order, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Run] + def list: ( + String thread_id, + ?after: String, + ?before: String, + ?limit: Integer, + ?order: OpenAI::Models::Beta::Threads::RunListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Beta::Threads::Run] - def cancel: - ( - String run_id, - OpenAI::Models::Beta::Threads::RunCancelParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::Run - | ( - String run_id, - thread_id: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::Run + def cancel: ( + String run_id, + thread_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::Run - def submit_tool_outputs: - ( - String run_id, - OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::Run - | ( - String run_id, - thread_id: String, - tool_outputs: ::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::Run + def submit_tool_outputs: ( + String run_id, + thread_id: String, + tool_outputs: ::Array[OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::Run - def submit_tool_outputs_streaming: - ( - String run_id, - OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Stream[OpenAI::Models::Beta::assistant_stream_event] - | ( - String run_id, - thread_id: String, - tool_outputs: ::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], - request_options: OpenAI::request_opts - ) -> OpenAI::Stream[OpenAI::Models::Beta::assistant_stream_event] + def submit_tool_outputs_stream_raw: ( + String run_id, + thread_id: String, + tool_outputs: ::Array[OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput], + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::Beta::assistant_stream_event] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/beta/threads/runs/steps.rbs b/sig/openai/resources/beta/threads/runs/steps.rbs index d9c96cfa..85af5cd8 100644 --- a/sig/openai/resources/beta/threads/runs/steps.rbs +++ b/sig/openai/resources/beta/threads/runs/steps.rbs @@ -4,36 +4,24 @@ module OpenAI class Threads class Runs class Steps - def retrieve: - ( - String step_id, - OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Beta::Threads::Runs::RunStep - | ( - String step_id, - thread_id: String, - run_id: String, - include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Beta::Threads::Runs::RunStep + def retrieve: ( + String step_id, + thread_id: String, + run_id: String, + ?include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], + ?request_options: OpenAI::request_opts + ) -> OpenAI::Beta::Threads::Runs::RunStep - def list: - ( - String run_id, - OpenAI::Models::Beta::Threads::Runs::StepListParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Runs::RunStep] - | ( - String run_id, - thread_id: String, - after: String, - before: String, - include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], - limit: Integer, - order: OpenAI::Models::Beta::Threads::Runs::StepListParams::order, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::Beta::Threads::Runs::RunStep] + def list: ( + String run_id, + thread_id: String, + ?after: String, + ?before: String, + ?include: ::Array[OpenAI::Models::Beta::Threads::Runs::run_step_include], + ?limit: Integer, + ?order: OpenAI::Models::Beta::Threads::Runs::StepListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Beta::Threads::Runs::RunStep] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/chat/completions.rbs b/sig/openai/resources/chat/completions.rbs index c843c57c..a4237ff1 100644 --- a/sig/openai/resources/chat/completions.rbs +++ b/sig/openai/resources/chat/completions.rbs @@ -4,131 +4,104 @@ module OpenAI class Completions attr_reader messages: OpenAI::Resources::Chat::Completions::Messages - def create: - ( - OpenAI::Models::Chat::CompletionCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Chat::ChatCompletion - | ( - messages: ::Array[OpenAI::Models::Chat::chat_completion_message_param], - model: OpenAI::Models::Chat::CompletionCreateParams::model, - audio: OpenAI::Models::Chat::ChatCompletionAudioParam?, - frequency_penalty: Float?, - function_call: OpenAI::Models::Chat::CompletionCreateParams::function_call, - functions: ::Array[OpenAI::Models::Chat::CompletionCreateParams::Function], - logit_bias: ::Hash[Symbol, Integer]?, - logprobs: bool?, - max_completion_tokens: Integer?, - max_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - modalities: ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality]?, - n: Integer?, - parallel_tool_calls: bool, - prediction: OpenAI::Models::Chat::ChatCompletionPredictionContent?, - presence_penalty: Float?, - reasoning_effort: OpenAI::Models::reasoning_effort?, - response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, - seed: Integer?, - service_tier: OpenAI::Models::Chat::CompletionCreateParams::service_tier?, - stop: OpenAI::Models::Chat::CompletionCreateParams::stop?, - store: bool?, - stream_options: OpenAI::Models::Chat::ChatCompletionStreamOptions?, - temperature: Float?, - tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, - tools: ::Array[OpenAI::Models::Chat::ChatCompletionTool], - top_logprobs: Integer?, - top_p: Float?, - user: String, - web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Chat::ChatCompletion + def create: ( + messages: ::Array[OpenAI::Models::Chat::chat_completion_message_param], + model: OpenAI::Models::Chat::CompletionCreateParams::model, + ?audio: OpenAI::Chat::ChatCompletionAudioParam?, + ?frequency_penalty: Float?, + ?function_call: OpenAI::Models::Chat::CompletionCreateParams::function_call, + ?functions: ::Array[OpenAI::Chat::CompletionCreateParams::Function], + ?logit_bias: ::Hash[Symbol, Integer]?, + ?logprobs: bool?, + ?max_completion_tokens: Integer?, + ?max_tokens: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?modalities: ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality]?, + ?n: Integer?, + ?parallel_tool_calls: bool, + ?prediction: OpenAI::Chat::ChatCompletionPredictionContent?, + ?presence_penalty: Float?, + ?prompt_cache_key: String, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, + ?safety_identifier: String, + ?seed: Integer?, + ?service_tier: OpenAI::Models::Chat::CompletionCreateParams::service_tier?, + ?stop: OpenAI::Models::Chat::CompletionCreateParams::stop?, + ?store: bool?, + ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, + ?temperature: Float?, + ?tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, + ?tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], + ?top_logprobs: Integer?, + ?top_p: Float?, + ?user: String, + ?verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity?, + ?web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Chat::ChatCompletion - def create_streaming: - ( - OpenAI::Models::Chat::CompletionCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Stream[OpenAI::Models::Chat::ChatCompletionChunk] - | ( - messages: ::Array[OpenAI::Models::Chat::chat_completion_message_param], - model: OpenAI::Models::Chat::CompletionCreateParams::model, - audio: OpenAI::Models::Chat::ChatCompletionAudioParam?, - frequency_penalty: Float?, - function_call: OpenAI::Models::Chat::CompletionCreateParams::function_call, - functions: ::Array[OpenAI::Models::Chat::CompletionCreateParams::Function], - logit_bias: ::Hash[Symbol, Integer]?, - logprobs: bool?, - max_completion_tokens: Integer?, - max_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - modalities: ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality]?, - n: Integer?, - parallel_tool_calls: bool, - prediction: OpenAI::Models::Chat::ChatCompletionPredictionContent?, - presence_penalty: Float?, - reasoning_effort: OpenAI::Models::reasoning_effort?, - response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, - seed: Integer?, - service_tier: OpenAI::Models::Chat::CompletionCreateParams::service_tier?, - stop: OpenAI::Models::Chat::CompletionCreateParams::stop?, - store: bool?, - stream_options: OpenAI::Models::Chat::ChatCompletionStreamOptions?, - temperature: Float?, - tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, - tools: ::Array[OpenAI::Models::Chat::ChatCompletionTool], - top_logprobs: Integer?, - top_p: Float?, - user: String, - web_search_options: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, - request_options: OpenAI::request_opts - ) -> OpenAI::Stream[OpenAI::Models::Chat::ChatCompletionChunk] + def stream_raw: ( + messages: ::Array[OpenAI::Models::Chat::chat_completion_message_param], + model: OpenAI::Models::Chat::CompletionCreateParams::model, + ?audio: OpenAI::Chat::ChatCompletionAudioParam?, + ?frequency_penalty: Float?, + ?function_call: OpenAI::Models::Chat::CompletionCreateParams::function_call, + ?functions: ::Array[OpenAI::Chat::CompletionCreateParams::Function], + ?logit_bias: ::Hash[Symbol, Integer]?, + ?logprobs: bool?, + ?max_completion_tokens: Integer?, + ?max_tokens: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?modalities: ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality]?, + ?n: Integer?, + ?parallel_tool_calls: bool, + ?prediction: OpenAI::Chat::ChatCompletionPredictionContent?, + ?presence_penalty: Float?, + ?prompt_cache_key: String, + ?reasoning_effort: OpenAI::Models::reasoning_effort?, + ?response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format, + ?safety_identifier: String, + ?seed: Integer?, + ?service_tier: OpenAI::Models::Chat::CompletionCreateParams::service_tier?, + ?stop: OpenAI::Models::Chat::CompletionCreateParams::stop?, + ?store: bool?, + ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, + ?temperature: Float?, + ?tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, + ?tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], + ?top_logprobs: Integer?, + ?top_p: Float?, + ?user: String, + ?verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity?, + ?web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Chat::ChatCompletionChunk] - def retrieve: - ( - String completion_id, - ?OpenAI::Models::Chat::CompletionRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Chat::ChatCompletion - | ( - String completion_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Chat::ChatCompletion + def retrieve: ( + String completion_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Chat::ChatCompletion - def update: - ( - String completion_id, - OpenAI::Models::Chat::CompletionUpdateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Chat::ChatCompletion - | ( - String completion_id, - metadata: OpenAI::Models::metadata?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Chat::ChatCompletion + def update: ( + String completion_id, + metadata: OpenAI::Models::metadata?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Chat::ChatCompletion - def list: - ( - ?OpenAI::Models::Chat::CompletionListParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::Chat::ChatCompletion] - | ( - after: String, - limit: Integer, - metadata: OpenAI::Models::metadata?, - model: String, - order: OpenAI::Models::Chat::CompletionListParams::order, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::Chat::ChatCompletion] + def list: ( + ?after: String, + ?limit: Integer, + ?metadata: OpenAI::Models::metadata?, + ?model: String, + ?order: OpenAI::Models::Chat::CompletionListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Chat::ChatCompletion] - def delete: - ( - String completion_id, - ?OpenAI::Models::Chat::CompletionDeleteParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Chat::ChatCompletionDeleted - | ( - String completion_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Chat::ChatCompletionDeleted + def delete: ( + String completion_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Chat::ChatCompletionDeleted def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/chat/completions/messages.rbs b/sig/openai/resources/chat/completions/messages.rbs index cd798db3..29b994c0 100644 --- a/sig/openai/resources/chat/completions/messages.rbs +++ b/sig/openai/resources/chat/completions/messages.rbs @@ -3,19 +3,13 @@ module OpenAI class Chat class Completions class Messages - def list: - ( - String completion_id, - ?OpenAI::Models::Chat::Completions::MessageListParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::Chat::ChatCompletionStoreMessage] - | ( - String completion_id, - after: String, - limit: Integer, - order: OpenAI::Models::Chat::Completions::MessageListParams::order, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::Chat::ChatCompletionStoreMessage] + def list: ( + String completion_id, + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::Chat::Completions::MessageListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Chat::ChatCompletionStoreMessage] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/completions.rbs b/sig/openai/resources/completions.rbs index c04b9ca0..6828d284 100644 --- a/sig/openai/resources/completions.rbs +++ b/sig/openai/resources/completions.rbs @@ -1,55 +1,47 @@ module OpenAI module Resources class Completions - def create: - ( - OpenAI::Models::CompletionCreateParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Completion - | ( - model: OpenAI::Models::CompletionCreateParams::model, - prompt: OpenAI::Models::CompletionCreateParams::prompt?, - best_of: Integer?, - echo: bool?, - frequency_penalty: Float?, - logit_bias: ::Hash[Symbol, Integer]?, - logprobs: Integer?, - max_tokens: Integer?, - n: Integer?, - presence_penalty: Float?, - seed: Integer?, - stop: OpenAI::Models::CompletionCreateParams::stop?, - stream_options: OpenAI::Models::Chat::ChatCompletionStreamOptions?, - suffix: String?, - temperature: Float?, - top_p: Float?, - user: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Completion + def create: ( + model: OpenAI::Models::CompletionCreateParams::model, + prompt: OpenAI::Models::CompletionCreateParams::prompt?, + ?best_of: Integer?, + ?echo: bool?, + ?frequency_penalty: Float?, + ?logit_bias: ::Hash[Symbol, Integer]?, + ?logprobs: Integer?, + ?max_tokens: Integer?, + ?n: Integer?, + ?presence_penalty: Float?, + ?seed: Integer?, + ?stop: OpenAI::Models::CompletionCreateParams::stop?, + ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, + ?suffix: String?, + ?temperature: Float?, + ?top_p: Float?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Completion - def create_streaming: - ( - OpenAI::Models::CompletionCreateParams | ::Hash[Symbol, top] params - ) -> OpenAI::Stream[OpenAI::Models::Completion] - | ( - model: OpenAI::Models::CompletionCreateParams::model, - prompt: OpenAI::Models::CompletionCreateParams::prompt?, - best_of: Integer?, - echo: bool?, - frequency_penalty: Float?, - logit_bias: ::Hash[Symbol, Integer]?, - logprobs: Integer?, - max_tokens: Integer?, - n: Integer?, - presence_penalty: Float?, - seed: Integer?, - stop: OpenAI::Models::CompletionCreateParams::stop?, - stream_options: OpenAI::Models::Chat::ChatCompletionStreamOptions?, - suffix: String?, - temperature: Float?, - top_p: Float?, - user: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Stream[OpenAI::Models::Completion] + def create_streaming: ( + model: OpenAI::Models::CompletionCreateParams::model, + prompt: OpenAI::Models::CompletionCreateParams::prompt?, + ?best_of: Integer?, + ?echo: bool?, + ?frequency_penalty: Float?, + ?logit_bias: ::Hash[Symbol, Integer]?, + ?logprobs: Integer?, + ?max_tokens: Integer?, + ?n: Integer?, + ?presence_penalty: Float?, + ?seed: Integer?, + ?stop: OpenAI::Models::CompletionCreateParams::stop?, + ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, + ?suffix: String?, + ?temperature: Float?, + ?top_p: Float?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Completion] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/containers.rbs b/sig/openai/resources/containers.rbs new file mode 100644 index 00000000..dfbe77b8 --- /dev/null +++ b/sig/openai/resources/containers.rbs @@ -0,0 +1,33 @@ +module OpenAI + module Resources + class Containers + attr_reader files: OpenAI::Resources::Containers::Files + + def create: ( + name: String, + ?expires_after: OpenAI::ContainerCreateParams::ExpiresAfter, + ?file_ids: ::Array[String], + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::ContainerCreateResponse + + def retrieve: ( + String container_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::ContainerRetrieveResponse + + def list: ( + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::ContainerListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Models::ContainerListResponse] + + def delete: ( + String container_id, + ?request_options: OpenAI::request_opts + ) -> nil + + def initialize: (client: OpenAI::Client) -> void + end + end +end diff --git a/sig/openai/resources/containers/files.rbs b/sig/openai/resources/containers/files.rbs new file mode 100644 index 00000000..561898fb --- /dev/null +++ b/sig/openai/resources/containers/files.rbs @@ -0,0 +1,38 @@ +module OpenAI + module Resources + class Containers + class Files + attr_reader content: OpenAI::Resources::Containers::Files::Content + + def create: ( + String container_id, + ?file: OpenAI::Internal::file_input, + ?file_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::Containers::FileCreateResponse + + def retrieve: ( + String file_id, + container_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::Containers::FileRetrieveResponse + + def list: ( + String container_id, + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::Containers::FileListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Models::Containers::FileListResponse] + + def delete: ( + String file_id, + container_id: String, + ?request_options: OpenAI::request_opts + ) -> nil + + def initialize: (client: OpenAI::Client) -> void + end + end + end +end diff --git a/sig/openai/resources/containers/files/content.rbs b/sig/openai/resources/containers/files/content.rbs new file mode 100644 index 00000000..03e09259 --- /dev/null +++ b/sig/openai/resources/containers/files/content.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Resources + class Containers + class Files + class Content + def retrieve: ( + String file_id, + container_id: String, + ?request_options: OpenAI::request_opts + ) -> StringIO + + def initialize: (client: OpenAI::Client) -> void + end + end + end + end +end diff --git a/sig/openai/resources/conversations.rbs b/sig/openai/resources/conversations.rbs new file mode 100644 index 00000000..ae48106e --- /dev/null +++ b/sig/openai/resources/conversations.rbs @@ -0,0 +1,31 @@ +module OpenAI + module Resources + class Conversations + attr_reader items: OpenAI::Resources::Conversations::Items + + def create: ( + ?items: ::Array[OpenAI::Models::Responses::response_input_item]?, + ?metadata: OpenAI::Models::metadata?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Conversations::Conversation + + def retrieve: ( + String conversation_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Conversations::Conversation + + def update: ( + String conversation_id, + metadata: ::Hash[Symbol, String], + ?request_options: OpenAI::request_opts + ) -> OpenAI::Conversations::Conversation + + def delete: ( + String conversation_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Conversations::ConversationDeletedResource + + def initialize: (client: OpenAI::Client) -> void + end + end +end diff --git a/sig/openai/resources/conversations/items.rbs b/sig/openai/resources/conversations/items.rbs new file mode 100644 index 00000000..f4349e59 --- /dev/null +++ b/sig/openai/resources/conversations/items.rbs @@ -0,0 +1,38 @@ +module OpenAI + module Resources + class Conversations + class Items + def create: ( + String conversation_id, + items: ::Array[OpenAI::Models::Responses::response_input_item], + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?request_options: OpenAI::request_opts + ) -> OpenAI::Conversations::ConversationItemList + + def retrieve: ( + String item_id, + conversation_id: String, + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::Conversations::conversation_item + + def list: ( + String conversation_id, + ?after: String, + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?limit: Integer, + ?order: OpenAI::Models::Conversations::ItemListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::ConversationCursorPage[OpenAI::Models::Conversations::conversation_item] + + def delete: ( + String item_id, + conversation_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Conversations::Conversation + + def initialize: (client: OpenAI::Client) -> void + end + end + end +end diff --git a/sig/openai/resources/embeddings.rbs b/sig/openai/resources/embeddings.rbs index 05dd3a2d..0f73d65c 100644 --- a/sig/openai/resources/embeddings.rbs +++ b/sig/openai/resources/embeddings.rbs @@ -1,18 +1,14 @@ module OpenAI module Resources class Embeddings - def create: - ( - OpenAI::Models::EmbeddingCreateParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::CreateEmbeddingResponse - | ( - input: OpenAI::Models::EmbeddingCreateParams::input, - model: OpenAI::Models::EmbeddingCreateParams::model, - dimensions: Integer, - encoding_format: OpenAI::Models::EmbeddingCreateParams::encoding_format, - user: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::CreateEmbeddingResponse + def create: ( + input: OpenAI::Models::EmbeddingCreateParams::input, + model: OpenAI::Models::EmbeddingCreateParams::model, + ?dimensions: Integer, + ?encoding_format: OpenAI::Models::EmbeddingCreateParams::encoding_format, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::CreateEmbeddingResponse def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/evals.rbs b/sig/openai/resources/evals.rbs new file mode 100644 index 00000000..dd3d6cc5 --- /dev/null +++ b/sig/openai/resources/evals.rbs @@ -0,0 +1,42 @@ +module OpenAI + module Resources + class Evals + attr_reader runs: OpenAI::Resources::Evals::Runs + + def create: ( + data_source_config: OpenAI::Models::EvalCreateParams::data_source_config, + testing_criteria: ::Array[OpenAI::Models::EvalCreateParams::testing_criterion], + ?metadata: OpenAI::Models::metadata?, + ?name: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::EvalCreateResponse + + def retrieve: ( + String eval_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::EvalRetrieveResponse + + def update: ( + String eval_id, + ?metadata: OpenAI::Models::metadata?, + ?name: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::EvalUpdateResponse + + def list: ( + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::EvalListParams::order, + ?order_by: OpenAI::Models::EvalListParams::order_by, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Models::EvalListResponse] + + def delete: ( + String eval_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::EvalDeleteResponse + + def initialize: (client: OpenAI::Client) -> void + end + end +end diff --git a/sig/openai/resources/evals/runs.rbs b/sig/openai/resources/evals/runs.rbs new file mode 100644 index 00000000..6f8f8f0d --- /dev/null +++ b/sig/openai/resources/evals/runs.rbs @@ -0,0 +1,46 @@ +module OpenAI + module Resources + class Evals + class Runs + attr_reader output_items: OpenAI::Resources::Evals::Runs::OutputItems + + def create: ( + String eval_id, + data_source: OpenAI::Models::Evals::RunCreateParams::data_source, + ?metadata: OpenAI::Models::metadata?, + ?name: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::Evals::RunCreateResponse + + def retrieve: ( + String run_id, + eval_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::Evals::RunRetrieveResponse + + def list: ( + String eval_id, + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::Evals::RunListParams::order, + ?status: OpenAI::Models::Evals::RunListParams::status, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Models::Evals::RunListResponse] + + def delete: ( + String run_id, + eval_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::Evals::RunDeleteResponse + + def cancel: ( + String run_id, + eval_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::Evals::RunCancelResponse + + def initialize: (client: OpenAI::Client) -> void + end + end + end +end diff --git a/sig/openai/resources/evals/runs/output_items.rbs b/sig/openai/resources/evals/runs/output_items.rbs new file mode 100644 index 00000000..a6124836 --- /dev/null +++ b/sig/openai/resources/evals/runs/output_items.rbs @@ -0,0 +1,28 @@ +module OpenAI + module Resources + class Evals + class Runs + class OutputItems + def retrieve: ( + String output_item_id, + eval_id: String, + run_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse + + def list: ( + String run_id, + eval_id: String, + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::Evals::Runs::OutputItemListParams::order, + ?status: OpenAI::Models::Evals::Runs::OutputItemListParams::status, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Models::Evals::Runs::OutputItemListResponse] + + def initialize: (client: OpenAI::Client) -> void + end + end + end + end +end diff --git a/sig/openai/resources/files.rbs b/sig/openai/resources/files.rbs index f8a6cae0..759285d0 100644 --- a/sig/openai/resources/files.rbs +++ b/sig/openai/resources/files.rbs @@ -1,54 +1,35 @@ module OpenAI module Resources class Files - def create: - ( - OpenAI::Models::FileCreateParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::FileObject - | ( - file: IO | StringIO, - purpose: OpenAI::Models::file_purpose, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::FileObject + def create: ( + file: OpenAI::Internal::file_input, + purpose: OpenAI::Models::file_purpose, + ?expires_after: OpenAI::FileCreateParams::ExpiresAfter, + ?request_options: OpenAI::request_opts + ) -> OpenAI::FileObject - def retrieve: - ( - String file_id, - ?OpenAI::Models::FileRetrieveParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::FileObject - | ( - String file_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::FileObject + def retrieve: ( + String file_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::FileObject - def list: - ( - ?OpenAI::Models::FileListParams | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::FileObject] - | ( - after: String, - limit: Integer, - order: OpenAI::Models::FileListParams::order, - purpose: String, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::FileObject] + def list: ( + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::FileListParams::order, + ?purpose: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::FileObject] - def delete: - ( - String file_id, - ?OpenAI::Models::FileDeleteParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::FileDeleted - | ( - String file_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::FileDeleted + def delete: ( + String file_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::FileDeleted - def content: - ( - String file_id, - ?OpenAI::Models::FileContentParams | ::Hash[Symbol, top] params - ) -> top - | (String file_id, request_options: OpenAI::request_opts) -> top + def content: ( + String file_id, + ?request_options: OpenAI::request_opts + ) -> StringIO def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/fine_tuning.rbs b/sig/openai/resources/fine_tuning.rbs index a78fe81e..f51dbeaa 100644 --- a/sig/openai/resources/fine_tuning.rbs +++ b/sig/openai/resources/fine_tuning.rbs @@ -1,8 +1,14 @@ module OpenAI module Resources class FineTuning + attr_reader methods_: OpenAI::Resources::FineTuning::Methods + attr_reader jobs: OpenAI::Resources::FineTuning::Jobs + attr_reader checkpoints: OpenAI::Resources::FineTuning::Checkpoints + + attr_reader alpha: OpenAI::Resources::FineTuning::Alpha + def initialize: (client: OpenAI::Client) -> void end end diff --git a/sig/openai/resources/fine_tuning/alpha.rbs b/sig/openai/resources/fine_tuning/alpha.rbs new file mode 100644 index 00000000..467661b1 --- /dev/null +++ b/sig/openai/resources/fine_tuning/alpha.rbs @@ -0,0 +1,11 @@ +module OpenAI + module Resources + class FineTuning + class Alpha + attr_reader graders: OpenAI::Resources::FineTuning::Alpha::Graders + + def initialize: (client: OpenAI::Client) -> void + end + end + end +end diff --git a/sig/openai/resources/fine_tuning/alpha/graders.rbs b/sig/openai/resources/fine_tuning/alpha/graders.rbs new file mode 100644 index 00000000..5283fadb --- /dev/null +++ b/sig/openai/resources/fine_tuning/alpha/graders.rbs @@ -0,0 +1,23 @@ +module OpenAI + module Resources + class FineTuning + class Alpha + class Graders + def run: ( + grader: OpenAI::Models::FineTuning::Alpha::GraderRunParams::grader, + model_sample: String, + ?item: top, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::FineTuning::Alpha::GraderRunResponse + + def validate: ( + grader: OpenAI::Models::FineTuning::Alpha::GraderValidateParams::grader, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::FineTuning::Alpha::GraderValidateResponse + + def initialize: (client: OpenAI::Client) -> void + end + end + end + end +end diff --git a/sig/openai/resources/fine_tuning/checkpoints.rbs b/sig/openai/resources/fine_tuning/checkpoints.rbs new file mode 100644 index 00000000..59e5e893 --- /dev/null +++ b/sig/openai/resources/fine_tuning/checkpoints.rbs @@ -0,0 +1,11 @@ +module OpenAI + module Resources + class FineTuning + class Checkpoints + attr_reader permissions: OpenAI::Resources::FineTuning::Checkpoints::Permissions + + def initialize: (client: OpenAI::Client) -> void + end + end + end +end diff --git a/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs b/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs new file mode 100644 index 00000000..f36dcbbb --- /dev/null +++ b/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs @@ -0,0 +1,32 @@ +module OpenAI + module Resources + class FineTuning + class Checkpoints + class Permissions + def create: ( + String fine_tuned_model_checkpoint, + project_ids: ::Array[String], + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Page[OpenAI::Models::FineTuning::Checkpoints::PermissionCreateResponse] + + def retrieve: ( + String fine_tuned_model_checkpoint, + ?after: String, + ?limit: Integer, + ?order: OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::order, + ?project_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse + + def delete: ( + String permission_id, + fine_tuned_model_checkpoint: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteResponse + + def initialize: (client: OpenAI::Client) -> void + end + end + end + end +end diff --git a/sig/openai/resources/fine_tuning/jobs.rbs b/sig/openai/resources/fine_tuning/jobs.rbs index 8bd2b3c9..c28f22d8 100644 --- a/sig/openai/resources/fine_tuning/jobs.rbs +++ b/sig/openai/resources/fine_tuning/jobs.rbs @@ -4,70 +4,52 @@ module OpenAI class Jobs attr_reader checkpoints: OpenAI::Resources::FineTuning::Jobs::Checkpoints - def create: - ( - OpenAI::Models::FineTuning::JobCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::FineTuning::FineTuningJob - | ( - model: OpenAI::Models::FineTuning::JobCreateParams::model, - training_file: String, - hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters, - integrations: ::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration]?, - metadata: OpenAI::Models::metadata?, - method_: OpenAI::Models::FineTuning::JobCreateParams::Method, - seed: Integer?, - suffix: String?, - validation_file: String?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::FineTuning::FineTuningJob + def create: ( + model: OpenAI::Models::FineTuning::JobCreateParams::model, + training_file: String, + ?hyperparameters: OpenAI::FineTuning::JobCreateParams::Hyperparameters, + ?integrations: ::Array[OpenAI::FineTuning::JobCreateParams::Integration]?, + ?metadata: OpenAI::Models::metadata?, + ?method_: OpenAI::FineTuning::JobCreateParams::Method, + ?seed: Integer?, + ?suffix: String?, + ?validation_file: String?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::FineTuning::FineTuningJob - def retrieve: - ( - String fine_tuning_job_id, - ?OpenAI::Models::FineTuning::JobRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::FineTuning::FineTuningJob - | ( - String fine_tuning_job_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::FineTuning::FineTuningJob + def retrieve: ( + String fine_tuning_job_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::FineTuning::FineTuningJob - def list: - ( - ?OpenAI::Models::FineTuning::JobListParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::FineTuning::FineTuningJob] - | ( - after: String, - limit: Integer, - metadata: ::Hash[Symbol, String]?, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::FineTuning::FineTuningJob] + def list: ( + ?after: String, + ?limit: Integer, + ?metadata: ::Hash[Symbol, String]?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::FineTuning::FineTuningJob] - def cancel: - ( - String fine_tuning_job_id, - ?OpenAI::Models::FineTuning::JobCancelParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::FineTuning::FineTuningJob - | ( - String fine_tuning_job_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::FineTuning::FineTuningJob + def cancel: ( + String fine_tuning_job_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::FineTuning::FineTuningJob - def list_events: - ( - String fine_tuning_job_id, - ?OpenAI::Models::FineTuning::JobListEventsParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::FineTuning::FineTuningJobEvent] - | ( - String fine_tuning_job_id, - after: String, - limit: Integer, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::FineTuning::FineTuningJobEvent] + def list_events: ( + String fine_tuning_job_id, + ?after: String, + ?limit: Integer, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::FineTuning::FineTuningJobEvent] + + def pause: ( + String fine_tuning_job_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::FineTuning::FineTuningJob + + def resume: ( + String fine_tuning_job_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::FineTuning::FineTuningJob def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/fine_tuning/jobs/checkpoints.rbs b/sig/openai/resources/fine_tuning/jobs/checkpoints.rbs index 824d9aa0..3770d993 100644 --- a/sig/openai/resources/fine_tuning/jobs/checkpoints.rbs +++ b/sig/openai/resources/fine_tuning/jobs/checkpoints.rbs @@ -3,18 +3,12 @@ module OpenAI class FineTuning class Jobs class Checkpoints - def list: - ( - String fine_tuning_job_id, - ?OpenAI::Models::FineTuning::Jobs::CheckpointListParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint] - | ( - String fine_tuning_job_id, - after: String, - limit: Integer, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint] + def list: ( + String fine_tuning_job_id, + ?after: String, + ?limit: Integer, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/fine_tuning/methods.rbs b/sig/openai/resources/fine_tuning/methods.rbs new file mode 100644 index 00000000..16dc30bc --- /dev/null +++ b/sig/openai/resources/fine_tuning/methods.rbs @@ -0,0 +1,9 @@ +module OpenAI + module Resources + class FineTuning + class Methods + def initialize: (client: OpenAI::Client) -> void + end + end + end +end diff --git a/sig/openai/resources/graders.rbs b/sig/openai/resources/graders.rbs new file mode 100644 index 00000000..aed8f8b6 --- /dev/null +++ b/sig/openai/resources/graders.rbs @@ -0,0 +1,9 @@ +module OpenAI + module Resources + class Graders + attr_reader grader_models: OpenAI::Resources::Graders::GraderModels + + def initialize: (client: OpenAI::Client) -> void + end + end +end diff --git a/sig/openai/resources/graders/grader_models.rbs b/sig/openai/resources/graders/grader_models.rbs new file mode 100644 index 00000000..637eb1a6 --- /dev/null +++ b/sig/openai/resources/graders/grader_models.rbs @@ -0,0 +1,9 @@ +module OpenAI + module Resources + class Graders + class GraderModels + def initialize: (client: OpenAI::Client) -> void + end + end + end +end diff --git a/sig/openai/resources/images.rbs b/sig/openai/resources/images.rbs index ce35a4e6..bd5dfbcf 100644 --- a/sig/openai/resources/images.rbs +++ b/sig/openai/resources/images.rbs @@ -1,52 +1,85 @@ module OpenAI module Resources class Images - def create_variation: - ( - OpenAI::Models::ImageCreateVariationParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::ImagesResponse - | ( - image: IO | StringIO, - model: OpenAI::Models::ImageCreateVariationParams::model?, - n: Integer?, - response_format: OpenAI::Models::ImageCreateVariationParams::response_format?, - size: OpenAI::Models::ImageCreateVariationParams::size?, - user: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::ImagesResponse + def create_variation: ( + image: OpenAI::Internal::file_input, + ?model: OpenAI::Models::ImageCreateVariationParams::model?, + ?n: Integer?, + ?response_format: OpenAI::Models::ImageCreateVariationParams::response_format?, + ?size: OpenAI::Models::ImageCreateVariationParams::size?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::ImagesResponse - def edit: - ( - OpenAI::Models::ImageEditParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::ImagesResponse - | ( - image: IO | StringIO, - prompt: String, - mask: IO | StringIO, - model: OpenAI::Models::ImageEditParams::model?, - n: Integer?, - response_format: OpenAI::Models::ImageEditParams::response_format?, - size: OpenAI::Models::ImageEditParams::size?, - user: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::ImagesResponse + def edit: ( + image: OpenAI::Models::ImageEditParams::image, + prompt: String, + ?background: OpenAI::Models::ImageEditParams::background?, + ?input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?, + ?mask: OpenAI::Internal::file_input, + ?model: OpenAI::Models::ImageEditParams::model?, + ?n: Integer?, + ?output_compression: Integer?, + ?output_format: OpenAI::Models::ImageEditParams::output_format?, + ?partial_images: Integer?, + ?quality: OpenAI::Models::ImageEditParams::quality?, + ?response_format: OpenAI::Models::ImageEditParams::response_format?, + ?size: OpenAI::Models::ImageEditParams::size?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::ImagesResponse - def generate: - ( - OpenAI::Models::ImageGenerateParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::ImagesResponse - | ( - prompt: String, - model: OpenAI::Models::ImageGenerateParams::model?, - n: Integer?, - quality: OpenAI::Models::ImageGenerateParams::quality, - response_format: OpenAI::Models::ImageGenerateParams::response_format?, - size: OpenAI::Models::ImageGenerateParams::size?, - style: OpenAI::Models::ImageGenerateParams::style?, - user: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::ImagesResponse + def edit_stream_raw: ( + image: OpenAI::Models::ImageEditParams::image, + prompt: String, + ?background: OpenAI::Models::ImageEditParams::background?, + ?input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?, + ?mask: OpenAI::Internal::file_input, + ?model: OpenAI::Models::ImageEditParams::model?, + ?n: Integer?, + ?output_compression: Integer?, + ?output_format: OpenAI::Models::ImageEditParams::output_format?, + ?partial_images: Integer?, + ?quality: OpenAI::Models::ImageEditParams::quality?, + ?response_format: OpenAI::Models::ImageEditParams::response_format?, + ?size: OpenAI::Models::ImageEditParams::size?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::image_edit_stream_event] + + def generate: ( + prompt: String, + ?background: OpenAI::Models::ImageGenerateParams::background?, + ?model: OpenAI::Models::ImageGenerateParams::model?, + ?moderation: OpenAI::Models::ImageGenerateParams::moderation?, + ?n: Integer?, + ?output_compression: Integer?, + ?output_format: OpenAI::Models::ImageGenerateParams::output_format?, + ?partial_images: Integer?, + ?quality: OpenAI::Models::ImageGenerateParams::quality?, + ?response_format: OpenAI::Models::ImageGenerateParams::response_format?, + ?size: OpenAI::Models::ImageGenerateParams::size?, + ?style: OpenAI::Models::ImageGenerateParams::style?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::ImagesResponse + + def generate_stream_raw: ( + prompt: String, + ?background: OpenAI::Models::ImageGenerateParams::background?, + ?model: OpenAI::Models::ImageGenerateParams::model?, + ?moderation: OpenAI::Models::ImageGenerateParams::moderation?, + ?n: Integer?, + ?output_compression: Integer?, + ?output_format: OpenAI::Models::ImageGenerateParams::output_format?, + ?partial_images: Integer?, + ?quality: OpenAI::Models::ImageGenerateParams::quality?, + ?response_format: OpenAI::Models::ImageGenerateParams::response_format?, + ?size: OpenAI::Models::ImageGenerateParams::size?, + ?style: OpenAI::Models::ImageGenerateParams::style?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::image_gen_stream_event] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/models.rbs b/sig/openai/resources/models.rbs index c1ed007f..dfe23787 100644 --- a/sig/openai/resources/models.rbs +++ b/sig/openai/resources/models.rbs @@ -1,33 +1,19 @@ module OpenAI module Resources class Models - def retrieve: - ( - String model, - ?OpenAI::Models::ModelRetrieveParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Model - | ( - String model, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Model + def retrieve: ( + String model, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Model - def list: - ( - ?OpenAI::Models::ModelListParams | ::Hash[Symbol, top] params - ) -> OpenAI::Page[OpenAI::Models::Model] - | ( - request_options: OpenAI::request_opts - ) -> OpenAI::Page[OpenAI::Models::Model] + def list: ( + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Page[OpenAI::Model] - def delete: - ( - String model, - ?OpenAI::Models::ModelDeleteParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::ModelDeleted - | ( - String model, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::ModelDeleted + def delete: ( + String model, + ?request_options: OpenAI::request_opts + ) -> OpenAI::ModelDeleted def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/moderations.rbs b/sig/openai/resources/moderations.rbs index a91a0487..1a10cff6 100644 --- a/sig/openai/resources/moderations.rbs +++ b/sig/openai/resources/moderations.rbs @@ -1,15 +1,11 @@ module OpenAI module Resources class Moderations - def create: - ( - OpenAI::Models::ModerationCreateParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::ModerationCreateResponse - | ( - input: OpenAI::Models::ModerationCreateParams::input, - model: OpenAI::Models::ModerationCreateParams::model, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::ModerationCreateResponse + def create: ( + input: OpenAI::Models::ModerationCreateParams::input, + ?model: OpenAI::Models::ModerationCreateParams::model, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Models::ModerationCreateResponse def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/responses.rbs b/sig/openai/resources/responses.rbs index bf3ce99c..aa09eca6 100644 --- a/sig/openai/resources/responses.rbs +++ b/sig/openai/resources/responses.rbs @@ -3,77 +3,91 @@ module OpenAI class Responses attr_reader input_items: OpenAI::Resources::Responses::InputItems - def create: - ( - OpenAI::Models::Responses::ResponseCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Responses::Response - | ( - input: OpenAI::Models::Responses::ResponseCreateParams::input, - model: OpenAI::Models::Responses::ResponseCreateParams::model, - include: ::Array[OpenAI::Models::Responses::response_includable]?, - instructions: String?, - max_output_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - parallel_tool_calls: bool?, - previous_response_id: String?, - reasoning: OpenAI::Models::Reasoning?, - store: bool?, - temperature: Float?, - text: OpenAI::Models::Responses::ResponseTextConfig, - tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, - tools: ::Array[OpenAI::Models::Responses::tool], - top_p: Float?, - truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?, - user: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Responses::Response + def create: ( + ?background: bool?, + ?conversation: OpenAI::Models::Responses::ResponseCreateParams::conversation?, + ?include: ::Array[OpenAI::Models::Responses::response_includable]?, + ?input: OpenAI::Models::Responses::ResponseCreateParams::input, + ?instructions: String?, + ?max_output_tokens: Integer?, + ?max_tool_calls: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::responses_model, + ?parallel_tool_calls: bool?, + ?previous_response_id: String?, + ?prompt: OpenAI::Responses::ResponsePrompt?, + ?prompt_cache_key: String, + ?reasoning: OpenAI::Reasoning?, + ?safety_identifier: String, + ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, + ?store: bool?, + ?stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, + ?temperature: Float?, + ?text: OpenAI::Responses::ResponseTextConfig, + ?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, + ?tools: ::Array[OpenAI::Models::Responses::tool], + ?top_logprobs: Integer?, + ?top_p: Float?, + ?truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Responses::Response - def create_streaming: - ( - OpenAI::Models::Responses::ResponseCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Stream[OpenAI::Models::Responses::response_stream_event] - | ( - input: OpenAI::Models::Responses::ResponseCreateParams::input, - model: OpenAI::Models::Responses::ResponseCreateParams::model, - include: ::Array[OpenAI::Models::Responses::response_includable]?, - instructions: String?, - max_output_tokens: Integer?, - metadata: OpenAI::Models::metadata?, - parallel_tool_calls: bool?, - previous_response_id: String?, - reasoning: OpenAI::Models::Reasoning?, - store: bool?, - temperature: Float?, - text: OpenAI::Models::Responses::ResponseTextConfig, - tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, - tools: ::Array[OpenAI::Models::Responses::tool], - top_p: Float?, - truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?, - user: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Stream[OpenAI::Models::Responses::response_stream_event] + def stream_raw: ( + ?background: bool?, + ?conversation: OpenAI::Models::Responses::ResponseCreateParams::conversation?, + ?include: ::Array[OpenAI::Models::Responses::response_includable]?, + ?input: OpenAI::Models::Responses::ResponseCreateParams::input, + ?instructions: String?, + ?max_output_tokens: Integer?, + ?max_tool_calls: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::responses_model, + ?parallel_tool_calls: bool?, + ?previous_response_id: String?, + ?prompt: OpenAI::Responses::ResponsePrompt?, + ?prompt_cache_key: String, + ?reasoning: OpenAI::Reasoning?, + ?safety_identifier: String, + ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, + ?store: bool?, + ?stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, + ?temperature: Float?, + ?text: OpenAI::Responses::ResponseTextConfig, + ?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, + ?tools: ::Array[OpenAI::Models::Responses::tool], + ?top_logprobs: Integer?, + ?top_p: Float?, + ?truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::Responses::response_stream_event] - def retrieve: - ( - String response_id, - ?OpenAI::Models::Responses::ResponseRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Responses::Response - | ( - String response_id, - include: ::Array[OpenAI::Models::Responses::response_includable], - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Responses::Response + def retrieve: ( + String response_id, + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?include_obfuscation: bool, + ?starting_after: Integer, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Responses::Response - def delete: - ( - String response_id, - ?OpenAI::Models::Responses::ResponseDeleteParams - | ::Hash[Symbol, top] params - ) -> nil - | (String response_id, request_options: OpenAI::request_opts) -> nil + def retrieve_streaming: ( + String response_id, + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?include_obfuscation: bool, + ?starting_after: Integer, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::Responses::response_stream_event] + + def delete: ( + String response_id, + ?request_options: OpenAI::request_opts + ) -> nil + + def cancel: ( + String response_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Responses::Response def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/responses/input_items.rbs b/sig/openai/resources/responses/input_items.rbs index 34cc6a93..eb2a0262 100644 --- a/sig/openai/resources/responses/input_items.rbs +++ b/sig/openai/resources/responses/input_items.rbs @@ -2,20 +2,14 @@ module OpenAI module Resources class Responses class InputItems - def list: - ( - String response_id, - ?OpenAI::Models::Responses::InputItemListParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::Responses::ResponseItemList::data] - | ( - String response_id, - after: String, - before: String, - limit: Integer, - order: OpenAI::Models::Responses::InputItemListParams::order, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::Responses::ResponseItemList::data] + def list: ( + String response_id, + ?after: String, + ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?limit: Integer, + ?order: OpenAI::Models::Responses::InputItemListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::Models::Responses::response_item] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/uploads.rbs b/sig/openai/resources/uploads.rbs index 116d8e02..28663206 100644 --- a/sig/openai/resources/uploads.rbs +++ b/sig/openai/resources/uploads.rbs @@ -3,39 +3,26 @@ module OpenAI class Uploads attr_reader parts: OpenAI::Resources::Uploads::Parts - def create: - ( - OpenAI::Models::UploadCreateParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Upload - | ( - bytes: Integer, - filename: String, - mime_type: String, - purpose: OpenAI::Models::file_purpose, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Upload + def create: ( + bytes: Integer, + filename: String, + mime_type: String, + purpose: OpenAI::Models::file_purpose, + ?expires_after: OpenAI::UploadCreateParams::ExpiresAfter, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Upload - def cancel: - ( - String upload_id, - ?OpenAI::Models::UploadCancelParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Upload - | ( - String upload_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Upload + def cancel: ( + String upload_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Upload - def complete: - ( - String upload_id, - OpenAI::Models::UploadCompleteParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Upload - | ( - String upload_id, - part_ids: ::Array[String], - md5: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Upload + def complete: ( + String upload_id, + part_ids: ::Array[String], + ?md5: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Upload def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/uploads/parts.rbs b/sig/openai/resources/uploads/parts.rbs index 40aa9015..41bfee05 100644 --- a/sig/openai/resources/uploads/parts.rbs +++ b/sig/openai/resources/uploads/parts.rbs @@ -2,17 +2,11 @@ module OpenAI module Resources class Uploads class Parts - def create: - ( - String upload_id, - OpenAI::Models::Uploads::PartCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::Uploads::UploadPart - | ( - String upload_id, - data: IO | StringIO, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::Uploads::UploadPart + def create: ( + String upload_id, + data: OpenAI::Internal::file_input, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Uploads::UploadPart def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/vector_stores.rbs b/sig/openai/resources/vector_stores.rbs index 34a6d26a..d717bd54 100644 --- a/sig/openai/resources/vector_stores.rbs +++ b/sig/openai/resources/vector_stores.rbs @@ -5,79 +5,50 @@ module OpenAI attr_reader file_batches: OpenAI::Resources::VectorStores::FileBatches - def create: - ( - ?OpenAI::Models::VectorStoreCreateParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStore - | ( - chunking_strategy: OpenAI::Models::file_chunking_strategy_param, - expires_after: OpenAI::Models::VectorStoreCreateParams::ExpiresAfter, - file_ids: ::Array[String], - metadata: OpenAI::Models::metadata?, - name: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStore - - def retrieve: - ( - String vector_store_id, - ?OpenAI::Models::VectorStoreRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStore - | ( - String vector_store_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStore - - def update: - ( - String vector_store_id, - ?OpenAI::Models::VectorStoreUpdateParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStore - | ( - String vector_store_id, - expires_after: OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter?, - metadata: OpenAI::Models::metadata?, - name: String?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStore - - def list: - ( - ?OpenAI::Models::VectorStoreListParams | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::VectorStore] - | ( - after: String, - before: String, - limit: Integer, - order: OpenAI::Models::VectorStoreListParams::order, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::VectorStore] - - def delete: - ( - String vector_store_id, - ?OpenAI::Models::VectorStoreDeleteParams | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStoreDeleted - | ( - String vector_store_id, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStoreDeleted - - def search: - ( - String vector_store_id, - OpenAI::Models::VectorStoreSearchParams | ::Hash[Symbol, top] params - ) -> OpenAI::Page[OpenAI::Models::VectorStoreSearchResponse] - | ( - String vector_store_id, - query: OpenAI::Models::VectorStoreSearchParams::query, - filters: OpenAI::Models::VectorStoreSearchParams::filters, - max_num_results: Integer, - ranking_options: OpenAI::Models::VectorStoreSearchParams::RankingOptions, - rewrite_query: bool, - request_options: OpenAI::request_opts - ) -> OpenAI::Page[OpenAI::Models::VectorStoreSearchResponse] + def create: ( + ?chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + ?expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter, + ?file_ids: ::Array[String], + ?metadata: OpenAI::Models::metadata?, + ?name: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStore + + def retrieve: ( + String vector_store_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStore + + def update: ( + String vector_store_id, + ?expires_after: OpenAI::VectorStoreUpdateParams::ExpiresAfter?, + ?metadata: OpenAI::Models::metadata?, + ?name: String?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStore + + def list: ( + ?after: String, + ?before: String, + ?limit: Integer, + ?order: OpenAI::Models::VectorStoreListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::VectorStore] + + def delete: ( + String vector_store_id, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStoreDeleted + + def search: ( + String vector_store_id, + query: OpenAI::Models::VectorStoreSearchParams::query, + ?filters: OpenAI::Models::VectorStoreSearchParams::filters, + ?max_num_results: Integer, + ?ranking_options: OpenAI::VectorStoreSearchParams::RankingOptions, + ?rewrite_query: bool, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Page[OpenAI::Models::VectorStoreSearchResponse] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/vector_stores/file_batches.rbs b/sig/openai/resources/vector_stores/file_batches.rbs index 39a8abf0..1228381c 100644 --- a/sig/openai/resources/vector_stores/file_batches.rbs +++ b/sig/openai/resources/vector_stores/file_batches.rbs @@ -2,60 +2,36 @@ module OpenAI module Resources class VectorStores class FileBatches - def create: - ( - String vector_store_id, - OpenAI::Models::VectorStores::FileBatchCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStores::VectorStoreFileBatch - | ( - String vector_store_id, - file_ids: ::Array[String], - attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?, - chunking_strategy: OpenAI::Models::file_chunking_strategy_param, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStores::VectorStoreFileBatch + def create: ( + String vector_store_id, + file_ids: ::Array[String], + ?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?, + ?chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStores::VectorStoreFileBatch - def retrieve: - ( - String batch_id, - OpenAI::Models::VectorStores::FileBatchRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStores::VectorStoreFileBatch - | ( - String batch_id, - vector_store_id: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStores::VectorStoreFileBatch + def retrieve: ( + String batch_id, + vector_store_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStores::VectorStoreFileBatch - def cancel: - ( - String batch_id, - OpenAI::Models::VectorStores::FileBatchCancelParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStores::VectorStoreFileBatch - | ( - String batch_id, - vector_store_id: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStores::VectorStoreFileBatch + def cancel: ( + String batch_id, + vector_store_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStores::VectorStoreFileBatch - def list_files: - ( - String batch_id, - OpenAI::Models::VectorStores::FileBatchListFilesParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::VectorStores::VectorStoreFile] - | ( - String batch_id, - vector_store_id: String, - after: String, - before: String, - filter: OpenAI::Models::VectorStores::FileBatchListFilesParams::filter, - limit: Integer, - order: OpenAI::Models::VectorStores::FileBatchListFilesParams::order, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::VectorStores::VectorStoreFile] + def list_files: ( + String batch_id, + vector_store_id: String, + ?after: String, + ?before: String, + ?filter: OpenAI::Models::VectorStores::FileBatchListFilesParams::filter, + ?limit: Integer, + ?order: OpenAI::Models::VectorStores::FileBatchListFilesParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::VectorStores::VectorStoreFile] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/vector_stores/files.rbs b/sig/openai/resources/vector_stores/files.rbs index 5abfd68c..dfbccc37 100644 --- a/sig/openai/resources/vector_stores/files.rbs +++ b/sig/openai/resources/vector_stores/files.rbs @@ -2,84 +2,48 @@ module OpenAI module Resources class VectorStores class Files - def create: - ( - String vector_store_id, - OpenAI::Models::VectorStores::FileCreateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStores::VectorStoreFile - | ( - String vector_store_id, - file_id: String, - attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileCreateParams::attribute]?, - chunking_strategy: OpenAI::Models::file_chunking_strategy_param, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStores::VectorStoreFile + def create: ( + String vector_store_id, + file_id: String, + ?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileCreateParams::attribute]?, + ?chunking_strategy: OpenAI::Models::file_chunking_strategy_param, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStores::VectorStoreFile - def retrieve: - ( - String file_id, - OpenAI::Models::VectorStores::FileRetrieveParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStores::VectorStoreFile - | ( - String file_id, - vector_store_id: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStores::VectorStoreFile + def retrieve: ( + String file_id, + vector_store_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStores::VectorStoreFile - def update: - ( - String file_id, - OpenAI::Models::VectorStores::FileUpdateParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStores::VectorStoreFile - | ( - String file_id, - vector_store_id: String, - attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileUpdateParams::attribute]?, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStores::VectorStoreFile + def update: ( + String file_id, + vector_store_id: String, + attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileUpdateParams::attribute]?, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStores::VectorStoreFile - def list: - ( - String vector_store_id, - ?OpenAI::Models::VectorStores::FileListParams - | ::Hash[Symbol, top] params - ) -> OpenAI::CursorPage[OpenAI::Models::VectorStores::VectorStoreFile] - | ( - String vector_store_id, - after: String, - before: String, - filter: OpenAI::Models::VectorStores::FileListParams::filter, - limit: Integer, - order: OpenAI::Models::VectorStores::FileListParams::order, - request_options: OpenAI::request_opts - ) -> OpenAI::CursorPage[OpenAI::Models::VectorStores::VectorStoreFile] + def list: ( + String vector_store_id, + ?after: String, + ?before: String, + ?filter: OpenAI::Models::VectorStores::FileListParams::filter, + ?limit: Integer, + ?order: OpenAI::Models::VectorStores::FileListParams::order, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::CursorPage[OpenAI::VectorStores::VectorStoreFile] - def delete: - ( - String file_id, - OpenAI::Models::VectorStores::FileDeleteParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Models::VectorStores::VectorStoreFileDeleted - | ( - String file_id, - vector_store_id: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Models::VectorStores::VectorStoreFileDeleted + def delete: ( + String file_id, + vector_store_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::VectorStores::VectorStoreFileDeleted - def content: - ( - String file_id, - OpenAI::Models::VectorStores::FileContentParams - | ::Hash[Symbol, top] params - ) -> OpenAI::Page[OpenAI::Models::VectorStores::FileContentResponse] - | ( - String file_id, - vector_store_id: String, - request_options: OpenAI::request_opts - ) -> OpenAI::Page[OpenAI::Models::VectorStores::FileContentResponse] + def content: ( + String file_id, + vector_store_id: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Page[OpenAI::Models::VectorStores::FileContentResponse] def initialize: (client: OpenAI::Client) -> void end diff --git a/sig/openai/resources/webhooks.rbs b/sig/openai/resources/webhooks.rbs new file mode 100644 index 00000000..bf67f552 --- /dev/null +++ b/sig/openai/resources/webhooks.rbs @@ -0,0 +1,24 @@ +module OpenAI + module Resources + class Webhooks + def unwrap: ( + String payload + ) -> (OpenAI::Webhooks::BatchCancelledWebhookEvent + | OpenAI::Webhooks::BatchCompletedWebhookEvent + | OpenAI::Webhooks::BatchExpiredWebhookEvent + | OpenAI::Webhooks::BatchFailedWebhookEvent + | OpenAI::Webhooks::EvalRunCanceledWebhookEvent + | OpenAI::Webhooks::EvalRunFailedWebhookEvent + | OpenAI::Webhooks::EvalRunSucceededWebhookEvent + | OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent + | OpenAI::Webhooks::FineTuningJobFailedWebhookEvent + | OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent + | OpenAI::Webhooks::ResponseCancelledWebhookEvent + | OpenAI::Webhooks::ResponseCompletedWebhookEvent + | OpenAI::Webhooks::ResponseFailedWebhookEvent + | OpenAI::Webhooks::ResponseIncompleteWebhookEvent) + + def initialize: (client: OpenAI::Client) -> void + end + end +end diff --git a/sig/openai/stream.rbs b/sig/openai/stream.rbs deleted file mode 100644 index 78d58b92..00000000 --- a/sig/openai/stream.rbs +++ /dev/null @@ -1,5 +0,0 @@ -module OpenAI - class Stream[Elem] < OpenAI::BaseStream[Elem] - private def iterator: -> Enumerable[Elem] - end -end diff --git a/sig/openai/util.rbs b/sig/openai/util.rbs deleted file mode 100644 index 2781e634..00000000 --- a/sig/openai/util.rbs +++ /dev/null @@ -1,112 +0,0 @@ -module OpenAI - module Util - def self?.monotonic_secs: -> Float - - def self?.arch: -> String - - def self?.os: -> String - - def self?.primitive?: (top input) -> (bool | top) - - def self?.coerce_boolean: (top input) -> (bool | top) - - def self?.coerce_boolean!: (top input) -> bool? - - def self?.coerce_integer: (top input) -> (Integer | top) - - def self?.coerce_float: (top input) -> (Float | top) - - def self?.coerce_hash: (top input) -> (::Hash[top, top] | top) - - OMIT: top - - def self?.deep_merge_lr: (top lhs, top rhs, concat: bool) -> top - - def self?.deep_merge: ( - *::Array[top] values, - sentinel: top?, - concat: bool - ) -> top - - def self?.dig: ( - ::Hash[Symbol, top] | ::Array[top] | top data, - (Symbol | Integer | ::Array[(Symbol | Integer)])? pick, - ?top? sentinel - ) { - -> top? - } -> top? - - def self?.uri_origin: (URI::Generic uri) -> String - - def self?.interpolate_path: (String | ::Array[String] path) -> String - - def self?.decode_query: (String? query) -> ::Hash[String, ::Array[String]] - - def self?.encode_query: ( - ::Hash[String, (::Array[String] | String)?]? query - ) -> String? - - type parsed_uri = - { - scheme: String?, - host: String?, - port: Integer?, - path: String?, - query: ::Hash[String, ::Array[String]] - } - - def self?.parse_uri: (URI::Generic | String url) -> OpenAI::Util::parsed_uri - - def self?.unparse_uri: (OpenAI::Util::parsed_uri parsed) -> URI::Generic - - def self?.join_parsed_uri: ( - OpenAI::Util::parsed_uri lhs, - OpenAI::Util::parsed_uri rhs - ) -> URI::Generic - - def self?.normalized_headers: ( - *::Hash[String, (String - | Integer - | ::Array[(String | Integer)?])?] headers - ) -> ::Hash[String, String] - - def self?.encode_multipart_formdata: ( - StringIO io, - boundary: String, - key: Symbol | String, - val: top - ) -> void - - def self?.encode_content: (::Hash[String, String] headers, top body) -> top - - def self?.decode_content: ( - ::Hash[String, String] headers, - stream: Enumerable[String], - suppress_error: bool - ) -> top - - def self?.fused_enum: ( - Enumerable[top] enum, - external: bool - ) { - -> void - } -> Enumerable[top] - - def self?.close_fused!: (Enumerable[top]? enum) -> void - - def self?.chain_fused: ( - Enumerable[top]? enum - ) { - (Enumerator::Yielder arg0) -> void - } -> void - - type sse_message = - { event: String?, data: String?, id: String?, retry: Integer? } - - def self?.decode_lines: (Enumerable[String] enum) -> Enumerable[String] - - def self?.decode_sse: ( - Enumerable[String] lines - ) -> OpenAI::Util::sse_message - end -end diff --git a/sig/openai/version.rbs b/sig/openai/version.rbs index adde5d9f..9d3c6077 100644 --- a/sig/openai/version.rbs +++ b/sig/openai/version.rbs @@ -1,3 +1,3 @@ module OpenAI - VERSION: "0.0.1-alpha.0" + VERSION: String end diff --git a/sorbet/config b/sorbet/config new file mode 100644 index 00000000..6fe84ed8 --- /dev/null +++ b/sorbet/config @@ -0,0 +1,2 @@ +--dir=rbi/ +--ignore=test/ diff --git a/sorbet/rbi/.gitignore b/sorbet/rbi/.gitignore new file mode 100644 index 00000000..d6b7ef32 --- /dev/null +++ b/sorbet/rbi/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/test/openai/base_model_test.rb b/test/openai/base_model_test.rb deleted file mode 100644 index 0444fb0e..00000000 --- a/test/openai/base_model_test.rb +++ /dev/null @@ -1,341 +0,0 @@ -# frozen_string_literal: true - -require_relative "test_helper" - -class OpenAI::Test::BaseModelTest < Minitest::Test - class E1 < OpenAI::Enum - A = :a - B = :b - end - - A1 = OpenAI::ArrayOf[-> { Integer }] - A2 = OpenAI::ArrayOf[enum: -> { E1 }] - - def test_basic - assert(E1.is_a?(OpenAI::Converter)) - assert(A1.is_a?(OpenAI::Converter)) - end - - def test_basic_coerce - assert_pattern do - OpenAI::Converter.coerce(A1, [1.0, 2.0, 3.0]) => [1, 2, 3] - end - - assert_pattern do - OpenAI::Converter.coerce(A2, %w[a b c]) => [:a, :b, :c] - end - end - - def test_basic_dump - assert_pattern do - OpenAI::Converter.dump(A1, [1.0, 2.0, 3.0]) => [1, 2, 3] - end - - assert_pattern do - OpenAI::Converter.dump(A2, %w[a b c]) => %w[a b c] - end - end - - def test_primitive_try_strict_coerce - d_now = Date.today - t_now = Time.now - - cases = { - [NilClass, :a] => [true, nil, 0], - [NilClass, nil] => [true, nil, 1], - [Integer, 1.0] => [true, 1, 1], - [Float, 1] => [true, 1.0, 1], - [Date, d_now] => [true, d_now, 1], - [Time, t_now] => [true, t_now, 1] - } - - cases.each do |test, expect| - type, input = test - assert_pattern do - OpenAI::Converter.try_strict_coerce(type, input) => ^expect - end - end - end - - def test_basic_enum_try_strict_coerce - cases = { - :a => [true, :a, 1], - "a" => [true, :a, 1], - :c => [false, true, 0], - 1 => [false, false, 0] - } - - cases.each do |input, expect| - assert_pattern do - OpenAI::Converter.try_strict_coerce(E1, input) => ^expect - end - end - end - - def test_basic_array_try_strict_coerce - cases = { - [] => [true, [], 0], - nil => [false, false, 0], - [1, 2, 3] => [true, [1, 2, 3], 3], - [1.0, 2.0, 3.0] => [true, [1, 2, 3], 3], - [1, nil, 3] => [true, [1, nil, 3], 2], - [1, nil, nil] => [true, [1, nil, nil], 1], - [1, "two", 3] => [false, true, 2] - } - - cases.each do |input, expect| - assert_pattern do - OpenAI::Converter.try_strict_coerce(A1, input) => ^expect - end - end - end - - def test_nested_array_try_strict_coerce - cases = { - %w[a b] => [true, [:a, :b], 2], - %w[a b c] => [false, true, 2] - } - - cases.each do |input, expect| - assert_pattern do - OpenAI::Converter.try_strict_coerce(A2, input) => ^expect - end - end - end - - class M1 < OpenAI::BaseModel - required :a, Time - optional :b, E1, api_name: :renamed - required :c, A1 - - request_only do - required :w, Integer - optional :x, String - end - - response_only do - required :y, Integer - optional :z, String - end - end - - class M2 < M1 - required :c, M1 - end - - def test_model_accessors - now = Time.now.round(0) - model = M2.new(a: now.to_s, b: "b", renamed: "a", c: [1.0, 2.0, 3.0], w: 1, y: 1) - - cases = [ - [model.a, now], - [model.b, :a], - [model.c, [1, 2, 3]], - [model.w, 1], - [model.y, 1] - ] - - cases.each do |input, expect| - assert_pattern do - input => ^expect - end - end - end - - def test_model_conversion_accessor - model = M2.new(c: {}) - assert_pattern do - model.c => M1 - end - end - - def test_model_equality - now = Time.now - model1 = M2.new(a: now, b: "b", renamed: "a", c: M1.new, w: 1, y: 1) - model2 = M2.new(a: now, b: "b", renamed: "a", c: M1.new, w: 1, y: 1) - - assert_pattern do - model2 => ^model1 - end - end - - def test_basic_model_coerce - cases = { - {} => M2.new, - {a: nil, b: :a, c: [1.0, 2.0, 3.0], w: 1} => M2.new(a: nil, b: :a, c: [1.0, 2.0, 3.0], w: 1) - } - - cases.each do |input, expect| - assert_pattern do - OpenAI::Converter.coerce(M2, input) => ^expect - end - end - end - - def test_basic_model_dump - cases = { - nil => nil, - {} => {}, - {w: 1, x: "x", y: 1, z: "z"} => {w: 1, x: "x"}, - [1, 2, 3] => [1, 2, 3] - } - - cases.each do |input, expect| - assert_pattern do - OpenAI::Converter.dump(M2, input) => ^expect - end - end - end - - def test_basic_model_try_strict_coerce - raw = {a: Time.now, c: [2], y: 1} - addn = {x: "x", n: "n"} - expect_exact = M1.new(raw) - expect_addn = M1.new(**raw, **addn) - - cases = { - {} => [false, true, 0], - raw => [true, expect_exact, 3], - {**raw, **addn} => [true, expect_addn, 4] - } - - cases.each do |input, expect| - assert_pattern do - OpenAI::Converter.try_strict_coerce(M1, input) => ^expect - end - end - end - - def test_nested_model_dump - now = Time.now - models = [M1, M2] - inputs = [ - M1.new(a: now, b: "a", c: [1.0, 2.0, 3.0], y: 1), - {a: now, b: "a", c: [1.0, 2.0, 3.0], y: 1}, - {"a" => now, b: "", "b" => "a", "c" => [], :c => [1.0, 2.0, 3.0], "y" => 1} - ] - - models.product(inputs).each do |model, input| - assert_pattern do - OpenAI::Converter.dump(model, input) => {a: now, renamed: "a", c: [1, 2, 3]} - end - end - end - - A3 = OpenAI::ArrayOf[A1] - - class M3 < M1 - optional :b, E1, api_name: :renamed_again - end - - class U1 < OpenAI::Union - discriminator :type - variant :a, M1 - variant :b, M3 - end - - class U2 < OpenAI::Union - variant A1 - variant A3 - end - - def test_basic_union - assert(U1.is_a?(OpenAI::Converter)) - - assert_pattern do - M1.new => U1 - M3.new => U1 - end - end - - def test_basic_discriminated_union_coerce - common = {a: Time.now, c: [], w: 1} - cases = { - nil => nil, - {type: "a", **common} => M1.new(type: "a", **common), - {type: :b, **common} => M3.new(type: :b, **common), - {type: :c, xyz: 1} => {type: :c, xyz: 1} - } - - cases.each do |input, expect| - assert_pattern do - OpenAI::Converter.coerce(U1, input) => ^expect - end - end - end - - def test_basic_discriminated_union_dump - now = Time.now - cases = { - nil => nil, - M1.new(a: now, b: :a, c: [1.0, 2.0, 3.0], y: 1) => {a: now, renamed: :a, c: [1, 2, 3]}, - M3.new(b: "a", y: 1) => {renamed_again: "a"}, - {type: :a, b: "a", y: 1} => {type: :a, renamed: "a"}, - {type: "b", b: "a", y: 1} => {type: "b", renamed_again: "a"}, - {type: :c, xyz: 1} => {type: :c, xyz: 1} - } - - cases.each do |input, expect| - assert_pattern do - OpenAI::Converter.dump(U1, input) => ^expect - end - end - end - - def test_basic_undifferentiated_union_try_strict_coerce - cases = { - [] => [true, [], 0], - [[]] => [true, [[]], 0], - # [nil] => [false, true, 0], - [1, 2, 3] => [true, [1, 2, 3], 3], - [[1, 2, 3], [4, 5, 6]] => [true, [[1, 2, 3], [4, 5, 6]], 6] - } - - cases.each do |input, expect| - assert_pattern do - OpenAI::Converter.try_strict_coerce(U2, input) => ^expect - end - end - end - - class C1 < OpenAI::BaseModel - required :a, const: :a - required :b, const: :b, nil?: true - optional :c, const: :c - end - - def test_basic_const - assert_pattern do - C1.dump(C1.new) => {a: :a} - C1.new => {a: :a} - C1.new(a: "a") => {a: :a} - C1.new(b: 2) => {b: 2} - C1.new.a => :a - C1.new.b => nil - C1.new.c => nil - end - end - - class E2 < OpenAI::Enum - A = :a - B = :b - end - - class U3 < OpenAI::Union - discriminator :type - variant :a, M1 - variant :b, M3 - end - - def test_basic_eql - assert_equal(OpenAI::Unknown, OpenAI::Unknown) - refute_equal(OpenAI::Unknown, OpenAI::BooleanModel) - assert_equal(OpenAI::BooleanModel, OpenAI::BooleanModel) - - assert_equal(E1, E2) - assert_equal(E1, E2) - - refute_equal(U1, U2) - assert_equal(U1, U3) - end -end diff --git a/test/openai/client_test.rb b/test/openai/client_test.rb index ced591bb..8ba8bd2d 100644 --- a/test/openai/client_test.rb +++ b/test/openai/client_test.rb @@ -3,12 +3,28 @@ require_relative "test_helper" class OpenAITest < Minitest::Test + extend Minitest::Serial + include WebMock::API + + def before_all + super + WebMock.enable! + end + def setup + super Thread.current.thread_variable_set(:mock_sleep, []) end def teardown Thread.current.thread_variable_set(:mock_sleep, nil) + WebMock.reset! + super + end + + def after_all + WebMock.disable! + super end def test_raises_on_missing_non_nullable_opts @@ -18,332 +34,292 @@ def test_raises_on_missing_non_nullable_opts assert_match(/is required/, e.message) end - class MockResponse - # @return [Integer] - attr_reader :code - - # @param code [Integer] - # @param headers [Hash{String=>String}] - # - def initialize(code, headers) - @code = code - @headers = {"content-type" => "application/json", **headers} - end - - # @param header [String] - # - # @return [String, nil] - # - def [](header) - @headers[header] - end - - # @param header [String] - # - # @return [Boolean] - # - def key?(header) - @headers.key?(header) - end - end - - class MockRequester - # @return [Integer] - attr_reader :response_code - - # @return [Hash{String=>String}] - attr_reader :response_headers - - # @return [Object] - attr_reader :response_data - - # @return [ArrayObject}>] - attr_accessor :attempts - - # @param response_code [Integer] - # @param response_headers [Hash{String=>String}] - # @param response_data [Object] - # - def initialize(response_code, response_headers, response_data) - @response_code = response_code - @response_headers = response_headers - @response_data = JSON.fast_generate(response_data) - @attempts = [] - end - - # @param req [Hash{Symbol=>Object}] - # - def execute(req) - # Deep copy the request because it is mutated on each retry. - attempts.push(Marshal.load(Marshal.dump(req))) - [MockResponse.new(response_code, response_headers), response_data.grapheme_clusters] - end - end - def test_client_default_request_default_retry_attempts - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") - requester = MockRequester.new(500, {}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json(status: 500, body: {}) - assert_raises(OpenAI::InternalServerError) do - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true - ) + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") + + assert_raises(OpenAI::Errors::InternalServerError) do + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") end - assert_equal(3, requester.attempts.length) + assert_requested(:any, /./, times: 3) end def test_client_given_request_default_retry_attempts - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key", max_retries: 3) - requester = MockRequester.new(500, {}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json(status: 500, body: {}) - assert_raises(OpenAI::InternalServerError) do - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true - ) + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 3) + + assert_raises(OpenAI::Errors::InternalServerError) do + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") end - assert_equal(4, requester.attempts.length) + assert_requested(:any, /./, times: 4) end def test_client_default_request_given_retry_attempts - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") - requester = MockRequester.new(500, {}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json(status: 500, body: {}) - assert_raises(OpenAI::InternalServerError) do + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") + + assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true, + model: :"gpt-5", request_options: {max_retries: 3} ) end - assert_equal(4, requester.attempts.length) + assert_requested(:any, /./, times: 4) end def test_client_given_request_given_retry_attempts - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key", max_retries: 3) - requester = MockRequester.new(500, {}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json(status: 500, body: {}) + + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 3) - assert_raises(OpenAI::InternalServerError) do + assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true, + model: :"gpt-5", request_options: {max_retries: 4} ) end - assert_equal(5, requester.attempts.length) + assert_requested(:any, /./, times: 5) end def test_client_retry_after_seconds - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key", max_retries: 1) - requester = MockRequester.new(500, {"retry-after" => "1.3"}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json( + status: 500, + headers: {"retry-after" => "1.3"}, + body: {} + ) - assert_raises(OpenAI::InternalServerError) do - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true - ) + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 1) + + assert_raises(OpenAI::Errors::InternalServerError) do + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") end - assert_equal(2, requester.attempts.length) + assert_requested(:any, /./, times: 2) assert_equal(1.3, Thread.current.thread_variable_get(:mock_sleep).last) end def test_client_retry_after_date - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key", max_retries: 1) - requester = MockRequester.new(500, {"retry-after" => (Time.now + 10).httpdate}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json( + status: 500, + headers: {"retry-after" => (Time.now + 10).httpdate}, + body: {} + ) - assert_raises(OpenAI::InternalServerError) do + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 1) + + assert_raises(OpenAI::Errors::InternalServerError) do Thread.current.thread_variable_set(:time_now, Time.now) - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true - ) + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") Thread.current.thread_variable_set(:time_now, nil) end - assert_equal(2, requester.attempts.length) + assert_requested(:any, /./, times: 2) assert_in_delta(10, Thread.current.thread_variable_get(:mock_sleep).last, 1.0) end def test_client_retry_after_ms - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key", max_retries: 1) - requester = MockRequester.new(500, {"retry-after-ms" => "1300"}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json( + status: 500, + headers: {"retry-after-ms" => "1300"}, + body: {} + ) - assert_raises(OpenAI::InternalServerError) do - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true - ) + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 1) + + assert_raises(OpenAI::Errors::InternalServerError) do + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") end - assert_equal(2, requester.attempts.length) + assert_requested(:any, /./, times: 2) assert_equal(1.3, Thread.current.thread_variable_get(:mock_sleep).last) end def test_retry_count_header - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") - requester = MockRequester.new(500, {}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json(status: 500, body: {}) - assert_raises(OpenAI::InternalServerError) do - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true - ) + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") + + assert_raises(OpenAI::Errors::InternalServerError) do + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") end - retry_count_headers = requester.attempts.map { _1[:headers]["x-stainless-retry-count"] } - assert_equal(%w[0 1 2], retry_count_headers) + 3.times do + assert_requested(:any, /./, headers: {"x-stainless-retry-count" => _1}) + end end def test_omit_retry_count_header - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") - requester = MockRequester.new(500, {}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json(status: 500, body: {}) + + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") - assert_raises(OpenAI::InternalServerError) do + assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true, + model: :"gpt-5", request_options: {extra_headers: {"x-stainless-retry-count" => nil}} ) end - retry_count_headers = requester.attempts.map { _1[:headers]["x-stainless-retry-count"] } - assert_equal([nil, nil, nil], retry_count_headers) + assert_requested(:any, /./, times: 3) do + refute_includes(_1.headers.keys.map(&:downcase), "x-stainless-retry-count") + end end def test_overwrite_retry_count_header - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") - requester = MockRequester.new(500, {}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json(status: 500, body: {}) + + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") - assert_raises(OpenAI::InternalServerError) do + assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true, + model: :"gpt-5", request_options: {extra_headers: {"x-stainless-retry-count" => "42"}} ) end - retry_count_headers = requester.attempts.map { _1[:headers]["x-stainless-retry-count"] } - assert_equal(%w[42 42 42], retry_count_headers) + assert_requested(:any, /./, headers: {"x-stainless-retry-count" => "42"}, times: 3) end def test_client_redirect_307 - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") - requester = MockRequester.new(307, {"location" => "/redirected"}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json( + status: 307, + headers: {"location" => "/redirected"}, + body: {} + ) + stub_request(:any, "http://localhost/redirected").to_return( + status: 307, + headers: {"location" => "/redirected"} + ) + + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") - assert_raises(OpenAI::APIConnectionError) do + assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true, + model: :"gpt-5", request_options: {extra_headers: {}} ) end - assert_equal("/redirected", requester.attempts.last[:url].path) - assert_equal(requester.attempts.first[:method], requester.attempts.last[:method]) - assert_equal(requester.attempts.first[:body], requester.attempts.last[:body]) - assert_equal( - requester.attempts.first[:headers]["content-type"], - requester.attempts.last[:headers]["content-type"] - ) + recorded, = WebMock::RequestRegistry.instance.requested_signatures.hash.first + + assert_requested(:any, "http://localhost/redirected", times: OpenAI::Client::MAX_REDIRECTS) do + assert_equal(recorded.method, _1.method) + assert_equal(recorded.body, _1.body) + assert_equal( + recorded.headers.transform_keys(&:downcase).fetch("content-type"), + _1.headers.transform_keys(&:downcase).fetch("content-type") + ) + end end def test_client_redirect_303 - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") - requester = MockRequester.new(303, {"location" => "/redirected"}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json( + status: 303, + headers: {"location" => "/redirected"}, + body: {} + ) + stub_request(:get, "http://localhost/redirected").to_return( + status: 303, + headers: {"location" => "/redirected"} + ) + + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") - assert_raises(OpenAI::APIConnectionError) do + assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true, + model: :"gpt-5", request_options: {extra_headers: {}} ) end - assert_equal("/redirected", requester.attempts.last[:url].path) - assert_equal(:get, requester.attempts.last[:method]) - assert_nil(requester.attempts.last[:body]) - assert_nil(requester.attempts.last[:headers]["Content-Type"]) + assert_requested(:get, "http://localhost/redirected", times: OpenAI::Client::MAX_REDIRECTS) do + headers = _1.headers.keys.map(&:downcase) + refute_includes(headers, "content-type") + assert_nil(_1.body) + end end def test_client_redirect_auth_keep_same_origin - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") - requester = MockRequester.new(307, {"location" => "/redirected"}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json( + status: 307, + headers: {"location" => "/redirected"}, + body: {} + ) + stub_request(:any, "http://localhost/redirected").to_return( + status: 307, + headers: {"location" => "/redirected"} + ) - assert_raises(OpenAI::APIConnectionError) do + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") + + assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true, - request_options: {extra_headers: {"Authorization" => "Bearer xyz"}} + model: :"gpt-5", + request_options: {extra_headers: {"authorization" => "Bearer xyz"}} ) end - assert_equal( - requester.attempts.first[:headers]["authorization"], - requester.attempts.last[:headers]["authorization"] - ) + recorded, = WebMock::RequestRegistry.instance.requested_signatures.hash.first + auth_header = recorded.headers.transform_keys(&:downcase).fetch("authorization") + + assert_equal("Bearer xyz", auth_header) + assert_requested(:any, "http://localhost/redirected", times: OpenAI::Client::MAX_REDIRECTS) do + auth_header = _1.headers.transform_keys(&:downcase).fetch("authorization") + assert_equal("Bearer xyz", auth_header) + end end def test_client_redirect_auth_strip_cross_origin - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") - requester = MockRequester.new(307, {"location" => "https://example.com/redirected"}, {}) - openai.requester = requester + stub_request(:post, "http://localhost/chat/completions").to_return_json( + status: 307, + headers: {"location" => "https://example.com/redirected"}, + body: {} + ) + stub_request(:any, "https://example.com/redirected").to_return( + status: 307, + headers: {"location" => "https://example.com/redirected"} + ) + + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") - assert_raises(OpenAI::APIConnectionError) do + assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true, - request_options: {extra_headers: {"Authorization" => "Bearer xyz"}} + model: :"gpt-5", + request_options: {extra_headers: {"authorization" => "Bearer xyz"}} ) end - assert_nil(requester.attempts.last[:headers]["Authorization"]) + assert_requested(:any, "https://example.com/redirected", times: OpenAI::Client::MAX_REDIRECTS) do + headers = _1.headers.keys.map(&:downcase) + refute_includes(headers, "authorization") + end end def test_default_headers - openai = OpenAI::Client.new(base_url: "http://localhost:4010", api_key: "My API Key") - requester = MockRequester.new(200, {}, {}) - openai.requester = requester - openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true - ) - headers = requester.attempts.first[:headers] + stub_request(:post, "http://localhost/chat/completions").to_return_json(status: 200, body: {}) + + openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") - refute_empty(headers["accept"]) - refute_empty(headers["content-type"]) + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") + + assert_requested(:any, /./) do |req| + headers = req.headers.transform_keys(&:downcase).fetch_values("accept", "content-type") + headers.each { refute_empty(_1) } + end end end diff --git a/test/openai/file_part_test.rb b/test/openai/file_part_test.rb new file mode 100644 index 00000000..96abf7cf --- /dev/null +++ b/test/openai/file_part_test.rb @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +require_relative "test_helper" + +class OpenAI::Test::FilePartTest < Minitest::Test + def test_to_json + text = "gray" + filepart = OpenAI::FilePart.new(StringIO.new(text)) + + assert_equal(text.to_json, filepart.to_json) + assert_equal(text.to_yaml, filepart.to_yaml) + end +end diff --git a/test/openai/internal/sorbet_runtime_support_test.rb b/test/openai/internal/sorbet_runtime_support_test.rb new file mode 100644 index 00000000..a3ad6c5a --- /dev/null +++ b/test/openai/internal/sorbet_runtime_support_test.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +require_relative "../test_helper" + +class OpenAI::Test::SorbetRuntimeSupportTest < Minitest::Test + extend Minitest::Serial + + i_suck_and_my_tests_are_order_dependent! + + module E + extend OpenAI::Internal::Type::Enum + + define_sorbet_constant!(:TaggedSymbol) { 1 } + end + + module U + extend OpenAI::Internal::Type::Union + + define_sorbet_constant!(:Variants) { 2 } + end + + class M < OpenAI::Internal::Type::BaseModel + define_sorbet_constant!(:OrHash) { 3 } + end + + def test_nil_aliases + err = OpenAI::Internal::Util::SorbetRuntimeSupport::MissingSorbetRuntimeError + + assert_raises(err) { OpenAI::Internal::AnyHash } + assert_raises(err) { OpenAI::Internal::FileInput } + assert_raises(err) { OpenAI::Internal::Type::Converter::Input } + assert_raises(err) { OpenAI::Internal::Type::Converter::CoerceState } + assert_raises(err) { OpenAI::Internal::Type::Converter::DumpState } + assert_raises(err) { OpenAI::Internal::Type::BaseModel::KnownField } + assert_raises(err) { OpenAI::Internal::Util::ParsedUri } + assert_raises(err) { OpenAI::Internal::Util::ServerSentEvent } + assert_raises(err) { OpenAI::Internal::Transport::BaseClient::RequestComponents } + assert_raises(err) { OpenAI::Internal::Transport::BaseClient::RequestInput } + assert_raises(err) { OpenAI::Internal::Transport::PooledNetRequester::Request } + assert_raises(err) { E::TaggedSymbol } + assert_raises(err) { U::Variants } + assert_raises(err) { M::OrHash } + end + + def test_stubbed_aliases + Kernel.instance_eval { const_set(:T, nil) } + + assert_equal(1, E::TaggedSymbol) + assert_equal(2, U::Variants) + assert_equal(3, M::OrHash) + end +end diff --git a/test/openai/internal/type/base_model_test.rb b/test/openai/internal/type/base_model_test.rb new file mode 100644 index 00000000..438037e4 --- /dev/null +++ b/test/openai/internal/type/base_model_test.rb @@ -0,0 +1,727 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +class OpenAI::Test::PrimitiveModelTest < Minitest::Test + A = OpenAI::Internal::Type::ArrayOf[-> { Integer }] + H = OpenAI::Internal::Type::HashOf[-> { Integer }, nil?: true] + + module E + extend OpenAI::Internal::Type::Enum + end + + module U + extend OpenAI::Internal::Type::Union + end + + class B < OpenAI::Internal::Type::BaseModel + optional :a, Integer + optional :b, B + end + + def test_typing + converters = [ + OpenAI::Internal::Type::Unknown, + OpenAI::Internal::Type::Boolean, + A, + H, + E, + U, + B + ] + + converters.each do |conv| + assert_pattern do + conv => OpenAI::Internal::Type::Converter + end + end + end + + def test_coerce + cases = { + [OpenAI::Internal::Type::Unknown, :a] => [{yes: 1}, :a], + [NilClass, :a] => [{maybe: 1}, nil], + [NilClass, nil] => [{yes: 1}, nil], + [OpenAI::Internal::Type::Boolean, true] => [{yes: 1}, true], + [OpenAI::Internal::Type::Boolean, "true"] => [{no: 1}, "true"], + [Integer, 1] => [{yes: 1}, 1], + [Integer, 1.0] => [{maybe: 1}, 1], + [Integer, "1"] => [{maybe: 1}, 1], + [Integer, "one"] => [{no: 1}, "one"], + [Float, 1] => [{yes: 1}, 1.0], + [Float, "1"] => [{maybe: 1}, 1.0], + [Float, :one] => [{no: 1}, :one], + [String, :str] => [{yes: 1}, "str"], + [String, "str"] => [{yes: 1}, "str"], + [String, 1] => [{maybe: 1}, "1"], + [:a, "a"] => [{yes: 1}, :a], + [Date, "1990-09-19"] => [{yes: 1}, Date.new(1990, 9, 19)], + [Date, Date.new(1990, 9, 19)] => [{yes: 1}, Date.new(1990, 9, 19)], + [Date, "one"] => [{no: 1}, "one"], + [Time, "1990-09-19"] => [{yes: 1}, Time.new(1990, 9, 19)], + [Time, Time.new(1990, 9, 19)] => [{yes: 1}, Time.new(1990, 9, 19)], + [Time, "one"] => [{no: 1}, "one"] + } + + cases.each do |lhs, rhs| + target, input = lhs + exactness, expect = rhs + state = OpenAI::Internal::Type::Converter.new_coerce_state + assert_pattern do + OpenAI::Internal::Type::Converter.coerce(target, input, state: state) => ^expect + state.fetch(:exactness).filter { _2.nonzero? }.to_h => ^exactness + end + end + end + + def test_dump + cases = { + [OpenAI::Internal::Type::Unknown, B.new(a: "one", b: B.new(a: 1.0))] => {a: "one", b: {a: 1}}, + [A, B.new(a: "one", b: B.new(a: 1.0))] => {a: "one", b: {a: 1}}, + [H, B.new(a: "one", b: B.new(a: 1.0))] => {a: "one", b: {a: 1}}, + [E, B.new(a: "one", b: B.new(a: 1.0))] => {a: "one", b: {a: 1}}, + [U, B.new(a: "one", b: B.new(a: 1.0))] => {a: "one", b: {a: 1}}, + [B, B.new(a: "one", b: B.new(a: 1.0))] => {a: "one", b: {a: 1}}, + [String, B.new(a: "one", b: B.new(a: 1.0))] => {a: "one", b: {a: 1}}, + [:b, B.new(a: "one", b: B.new(a: 1.0))] => {a: "one", b: {a: 1}}, + [nil, B.new(a: "one", b: B.new(a: 1.0))] => {a: "one", b: {a: 1}}, + [OpenAI::Internal::Type::Boolean, true] => true, + [OpenAI::Internal::Type::Boolean, "true"] => "true", + [Integer, "1"] => "1", + [Float, 1] => 1, + [String, "one"] => "one", + [String, :one] => :one, + [:a, :b] => :b, + [:a, "a"] => "a", + [String, StringIO.new("one")] => "one", + [String, Pathname(__FILE__)] => OpenAI::FilePart + } + + cases.each do + target, input = _1 + expect = _2 + assert_pattern do + OpenAI::Internal::Type::Converter.dump(target, input) => ^expect + end + end + end + + def test_coerce_errors + cases = { + [Integer, "one"] => ArgumentError, + [Float, "one"] => ArgumentError, + [String, Time] => TypeError, + [Date, "one"] => ArgumentError, + [Time, "one"] => ArgumentError + } + + cases.each do |testcase, expect| + target, input = testcase + state = OpenAI::Internal::Type::Converter.new_coerce_state + OpenAI::Internal::Type::Converter.coerce(target, input, state: state) + assert_pattern do + state => {error: ^expect} + end + end + end + + def test_dump_retry + types = [ + OpenAI::Internal::Type::Unknown, + OpenAI::Internal::Type::Boolean, + A, + H, + E, + U, + B + ] + Pathname(__FILE__).open do |fd| + cases = [ + fd, + [fd], + {a: fd}, + {a: {b: fd}} + ] + types.product(cases).each do |target, input| + state = {can_retry: true} + OpenAI::Internal::Type::Converter.dump(target, input, state: state) + + assert_pattern do + state => {can_retry: false} + end + end + end + end +end + +class OpenAI::Test::EnumModelTest < Minitest::Test + class E0 + include OpenAI::Internal::Type::Enum + + attr_reader :values + + def initialize(*values) = (@values = values) + end + + module E1 + extend OpenAI::Internal::Type::Enum + + TRUE = true + end + + module E2 + extend OpenAI::Internal::Type::Enum + + ONE = 1 + TWO = 2 + end + + module E3 + extend OpenAI::Internal::Type::Enum + + ONE = 1.0 + TWO = 2.0 + end + + module E4 + extend OpenAI::Internal::Type::Enum + + ONE = :one + TWO = :two + end + + def test_coerce + cases = { + [E0.new, "one"] => [{no: 1}, "one"], + [E0.new(:one), "one"] => [{yes: 1}, :one], + [E0.new(:two), "one"] => [{maybe: 1}, "one"], + + [E1, true] => [{yes: 1}, true], + [E1, false] => [{no: 1}, false], + [E1, :true] => [{no: 1}, :true], + + [E2, 1] => [{yes: 1}, 1], + [E2, 1.0] => [{yes: 1}, 1], + [E2, 1.2] => [{no: 1}, 1.2], + [E2, "1"] => [{no: 1}, "1"], + + [E3, 1.0] => [{yes: 1}, 1.0], + [E3, 1] => [{yes: 1}, 1.0], + [E3, "one"] => [{no: 1}, "one"], + + [E4, :one] => [{yes: 1}, :one], + [E4, "one"] => [{yes: 1}, :one], + [E4, "1"] => [{maybe: 1}, "1"], + [E4, :"1"] => [{maybe: 1}, :"1"], + [E4, 1] => [{no: 1}, 1] + } + + cases.each do |lhs, rhs| + target, input = lhs + exactness, expect = rhs + state = OpenAI::Internal::Type::Converter.new_coerce_state + assert_pattern do + OpenAI::Internal::Type::Converter.coerce(target, input, state: state) => ^expect + state.fetch(:exactness).filter { _2.nonzero? }.to_h => ^exactness + end + end + end + + def test_dump + cases = { + [E1, true] => true, + [E1, "true"] => "true", + + [E2, 1.0] => 1.0, + [E2, 3] => 3, + [E2, "1.0"] => "1.0", + + [E3, 1.0] => 1.0, + [E3, 3] => 3, + [E3, "1.0"] => "1.0", + + [E4, :one] => :one, + [E4, "one"] => "one", + [E4, "1.0"] => "1.0" + } + + cases.each do + target, input = _1 + expect = _2 + assert_pattern do + OpenAI::Internal::Type::Converter.dump(target, input) => ^expect + end + end + end +end + +class OpenAI::Test::CollectionModelTest < Minitest::Test + A1 = OpenAI::Internal::Type::ArrayOf[-> { Integer }] + H1 = OpenAI::Internal::Type::HashOf[Integer] + + A2 = OpenAI::Internal::Type::ArrayOf[H1] + H2 = OpenAI::Internal::Type::HashOf[-> { A1 }] + + A3 = OpenAI::Internal::Type::ArrayOf[Integer, nil?: true] + H3 = OpenAI::Internal::Type::HashOf[Integer, nil?: true] + + def test_coerce + cases = { + [A1, []] => [{yes: 1}, []], + [A1, {}] => [{no: 1}, {}], + [A1, [1, 2.0]] => [{yes: 2, maybe: 1}, [1, 2]], + [A1, ["1", 2.0]] => [{yes: 1, maybe: 2}, [1, 2]], + [H1, {}] => [{yes: 1}, {}], + [H1, []] => [{no: 1}, []], + [H1, {a: 1, b: 2}] => [{yes: 3}, {a: 1, b: 2}], + [H1, {"a" => 1, "b" => 2}] => [{yes: 3}, {a: 1, b: 2}], + [H1, {[] => 1}] => [{yes: 2, no: 1}, {[] => 1}], + [H1, {a: 1.5}] => [{yes: 1, maybe: 1}, {a: 1}], + + [A2, [{}, {"a" => 1}]] => [{yes: 4}, [{}, {a: 1}]], + [A2, [{"a" => "1"}]] => [{yes: 2, maybe: 1}, [{a: 1}]], + [H2, {a: [1, 2]}] => [{yes: 4}, {a: [1, 2]}], + [H2, {"a" => ["1", 2]}] => [{yes: 3, maybe: 1}, {a: [1, 2]}], + [H2, {"a" => ["one", 2]}] => [{yes: 3, no: 1}, {a: ["one", 2]}], + + [A3, [nil, 1]] => [{yes: 3}, [nil, 1]], + [A3, [nil, "1"]] => [{yes: 2, maybe: 1}, [nil, 1]], + [H3, {a: nil, b: "1"}] => [{yes: 2, maybe: 1}, {a: nil, b: 1}], + [H3, {a: nil}] => [{yes: 2}, {a: nil}] + } + + cases.each do |lhs, rhs| + target, input = lhs + exactness, expect = rhs + state = OpenAI::Internal::Type::Converter.new_coerce_state + assert_pattern do + OpenAI::Internal::Type::Converter.coerce(target, input, state: state) => ^expect + state.fetch(:exactness).filter { _2.nonzero? }.to_h => ^exactness + end + end + end +end + +class OpenAI::Test::BaseModelTest < Minitest::Test + class M1 < OpenAI::Internal::Type::BaseModel + required :a, Integer + end + + class M2 < M1 + required :a, Time + required :b, Integer, nil?: true + optional :c, String + end + + class M3 < OpenAI::Internal::Type::BaseModel + optional :c, const: :c + required :d, const: :d + end + + class M4 < M1 + request_only do + required :a, Integer + optional :b, String + end + + response_only do + required :c, Integer + optional :d, String + end + end + + class M5 < OpenAI::Internal::Type::BaseModel + request_only do + required :c, const: :c + end + + response_only do + required :d, const: :d + end + end + + class M6 < M1 + required :a, OpenAI::Internal::Type::ArrayOf[M6] + optional :b, M6 + end + + def test_coerce + cases = { + [M1, {}] => [{yes: 1, no: 1}, {}], + [M1, :m1] => [{no: 1}, :m1], + + [M2, {}] => [{yes: 2, no: 1, maybe: 1}, {}], + [M2, {a: "1990-09-19", b: nil}] => [{yes: 4}, {a: "1990-09-19", b: nil}], + [M2, {a: "1990-09-19", b: "1"}] => [{yes: 3, maybe: 1}, {a: "1990-09-19", b: "1"}], + [M2, {a: "1990-09-19"}] => [{yes: 3, maybe: 1}, {a: "1990-09-19"}], + [M2, {a: "1990-09-19", c: nil}] => [{yes: 2, maybe: 2}, {a: "1990-09-19", c: nil}], + + [M3, {c: "c", d: "d"}] => [{yes: 3}, {c: :c, d: :d}], + [M3, {c: "d", d: "c"}] => [{yes: 1, maybe: 2}, {c: "d", d: "c"}], + + [M4, {c: 2}] => [{yes: 5}, {c: 2}], + [M4, {a: "1", c: 2}] => [{yes: 4, maybe: 1}, {a: "1", c: 2}], + [M4, {b: nil, c: 2}] => [{yes: 4, maybe: 1}, {b: nil, c: 2}], + + [M5, {}] => [{yes: 3}, {}], + [M5, {c: "c"}] => [{yes: 3}, {c: :c}], + [M5, {d: "d"}] => [{yes: 3}, {d: :d}], + [M5, {d: nil}] => [{yes: 2, no: 1}, {d: nil}], + + [M6, {a: [{a: []}]}] => [{yes: 6}, -> { _1 in {a: [M6]} }], + [M6, {b: {a: []}}] => [{yes: 4, no: 1}, -> { _1 in {b: M6} }] + } + + cases.each do |lhs, rhs| + target, input = lhs + exactness, expect = rhs + state = OpenAI::Internal::Type::Converter.new_coerce_state + assert_pattern do + coerced = OpenAI::Internal::Type::Converter.coerce(target, input, state: state) + assert_equal(coerced, coerced) + if coerced.is_a?(OpenAI::Internal::Type::BaseModel) + coerced.to_h => ^expect + else + coerced => ^expect + end + state.fetch(:exactness).filter { _2.nonzero? }.to_h => ^exactness + end + end + end + + def test_dump + cases = { + [M3, M3.new] => {d: :d}, + [M3, {}] => {d: :d}, + [M3, {d: 1}] => {d: 1}, + + [M4, M4.new(a: 1, b: "b", c: 2, d: "d")] => {a: 1, b: "b"}, + [M4, {a: 1, b: "b", c: 2, d: "d"}] => {a: 1, b: "b"}, + + [M5, M5.new] => {c: :c}, + [M5, {}] => {c: :c}, + [M5, {c: 1}] => {c: 1} + } + + cases.each do + target, input = _1 + expect = _2 + assert_pattern do + OpenAI::Internal::Type::Converter.dump(target, input) => ^expect + end + end + end + + def test_accessors + cases = { + M2.new({a: "1990-09-19", b: "1"}) => [{a: "1990-09-19", b: "1"}, {a: Time.new(1990, 9, 19), b: 1}], + M2.new(a: "one", b: "one") => [{a: "one", b: "one"}, {a: ArgumentError, b: ArgumentError}], + M2.new(a: nil, b: 2.0) => [{a: nil, b: 2.0}, {a: TypeError}], + M2.new(a: nil, b: 2.2) => [{a: nil, b: 2.2}, {a: TypeError, b: 2}], + + M3.new => [{}, {d: :d}], + M3.new(d: 1) => [{d: 1}, {d: ArgumentError}], + + M5.new => [{}, {c: :c, d: :d}] + } + + cases.each do + target = _1 + data, attributes = _2 + + assert_pattern do + target.to_h => ^data + end + + attributes.each do |accessor, expect| + case expect + in Class if expect <= StandardError + tap do + target.public_send(accessor) + flunk + rescue OpenAI::Errors::ConversionError => e + assert_kind_of(expect, e.cause) + end + else + assert_pattern { target.public_send(accessor) => ^expect } + end + end + end + end + + def test_inplace_modification + m1 = M6.new(a: []) + m1.a << M6.new(a: []) + + m2 = M6.new(b: M6.new(a: [])) + m2.b.a << M6.new(a: []) + + m3 = M6.new(a: []) + m4 = M6.new(b: m3) + m3.a << M6.new(a: []) + + assert_pattern do + m1 => {a: [{a: []}]} + m2 => {b: {a: [{a: []}]}} + m4 => {b: {a: [{a: []}]}} + end + end +end + +class OpenAI::Test::UnionTest < Minitest::Test + class U0 + include OpenAI::Internal::Type::Union + + def initialize(*variants) = variants.each { variant(_1) } + end + + module U1 + extend OpenAI::Internal::Type::Union + + variant const: :a + variant const: 2 + end + + class M1 < OpenAI::Internal::Type::BaseModel + required :t, const: :a, api_name: :type + optional :c, String + end + + class M2 < OpenAI::Internal::Type::BaseModel + required :type, const: :b + optional :c, String + end + + module U2 + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :a, M1 + variant :b, M2 + end + + module U3 + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant :a, M1 + variant String + end + + module U4 + extend OpenAI::Internal::Type::Union + + discriminator :type + + variant String + variant :a, M1 + end + + class M3 < OpenAI::Internal::Type::BaseModel + optional :recur, -> { U5 } + required :a, Integer + end + + class M4 < OpenAI::Internal::Type::BaseModel + optional :recur, -> { U5 } + required :a, OpenAI::Internal::Type::ArrayOf[-> { U5 }] + end + + class M5 < OpenAI::Internal::Type::BaseModel + optional :recur, -> { U5 } + required :b, OpenAI::Internal::Type::ArrayOf[-> { U5 }] + end + + module U5 + extend OpenAI::Internal::Type::Union + + variant -> { M3 } + variant -> { M4 } + end + + module U6 + extend OpenAI::Internal::Type::Union + + variant -> { M3 } + variant -> { M5 } + end + + def test_accessors + model = M3.new(recur: []) + tap do + model.recur + flunk + rescue OpenAI::Errors::ConversionError => e + assert_kind_of(ArgumentError, e.cause) + end + end + + def test_coerce + cases = { + [U0, :""] => [{no: 1}, 0, :""], + + [U0.new(Integer, Float), "one"] => [{no: 1}, 2, "one"], + [U0.new(Integer, Float), 1.0] => [{yes: 1}, 2, 1.0], + [U0.new({const: :a}), "a"] => [{yes: 1}, 1, :a], + [U0.new({const: :a}), "2"] => [{maybe: 1}, 1, "2"], + + [U1, "a"] => [{yes: 1}, 1, :a], + [U1, "2"] => [{maybe: 1}, 2, "2"], + [U1, :b] => [{maybe: 1}, 2, :b], + + [U2, {type: :a}] => [{yes: 3}, 0, {t: :a}], + [U2, {type: "b"}] => [{yes: 3}, 0, {type: :b}], + + [U3, "one"] => [{yes: 1}, 2, "one"], + [U4, "one"] => [{yes: 1}, 1, "one"], + + [U5, {a: []}] => [{yes: 3}, 2, {a: []}], + [U6, {b: []}] => [{yes: 3}, 2, {b: []}], + + [U5, {a: [{a: []}]}] => [{yes: 6}, 4, {a: [M4.new(a: [])]}], + [U5, {a: [{a: [{a: []}]}]}] => [{yes: 9}, 6, {a: [M4.new(a: [M4.new(a: [])])]}] + } + + cases.each do |lhs, rhs| + target, input = lhs + exactness, branched, expect = rhs + state = OpenAI::Internal::Type::Converter.new_coerce_state + assert_pattern do + coerced = OpenAI::Internal::Type::Converter.coerce(target, input, state: state) + assert_equal(coerced, coerced) + if coerced.is_a?(OpenAI::Internal::Type::BaseModel) + coerced.to_h => ^expect + else + coerced => ^expect + end + state.fetch(:exactness).filter { _2.nonzero? }.to_h => ^exactness + state => {branched: ^branched} + end + end + end +end + +class OpenAI::Test::BaseModelQoLTest < Minitest::Test + class E0 + include OpenAI::Internal::Type::Enum + + attr_reader :values + + def initialize(*values) = (@values = values) + end + + module E1 + extend OpenAI::Internal::Type::Enum + + A = 1 + end + + module E2 + extend OpenAI::Internal::Type::Enum + + A = 1 + end + + module E3 + extend OpenAI::Internal::Type::Enum + + A = 2 + B = 3 + end + + class U0 + include OpenAI::Internal::Type::Union + + def initialize(*variants) = variants.each { variant(_1) } + end + + module U1 + extend OpenAI::Internal::Type::Union + + variant String + variant Integer + end + + module U2 + extend OpenAI::Internal::Type::Union + + variant String + variant Integer + end + + class M1 < OpenAI::Internal::Type::BaseModel + required :a, Integer + end + + class M2 < OpenAI::Internal::Type::BaseModel + required :a, Integer, nil?: true + end + + class M3 < M2 + required :a, Integer + end + + def test_equality + cases = { + [OpenAI::Internal::Type::Unknown, OpenAI::Internal::Type::Unknown] => true, + [OpenAI::Internal::Type::Boolean, OpenAI::Internal::Type::Boolean] => true, + [OpenAI::Internal::Type::Unknown, OpenAI::Internal::Type::Boolean] => false, + [E0.new(:a, :b), E0.new(:a, :b)] => true, + [E0.new(:a, :b), E0.new(:b, :a)] => true, + [E0.new(:a, :b), E0.new(:b, :c)] => false, + [E1, E2] => true, + [E1, E3] => false, + [U0.new(String, Integer), U0.new(String, Integer)] => true, + [U0.new(String, Integer), U0.new(Integer, String)] => false, + [U0.new(String, Float), U0.new(String, Integer)] => false, + [U1, U2] => true, + [M1, M2] => false, + [M1, M3] => true, + [M1.new(a: 1), M1.new(a: 1)] => true + } + + cases.each do + if _2 + assert_equal(*_1) + assert_equal(*_1.map(&:hash)) + else + refute_equal(*_1) + refute_equal(*_1.map(&:hash)) + end + end + end +end + +class OpenAI::Test::MetaInfoTest < Minitest::Test + A1 = OpenAI::Internal::Type::ArrayOf[Integer, nil?: true, doc: "dog"] + H1 = OpenAI::Internal::Type::HashOf[-> { String }, nil?: true, doc: "dawg"] + + class M1 < OpenAI::Internal::Type::BaseModel + required :a, Integer, doc: "dog" + optional :b, -> { String }, nil?: true, doc: "dawg" + end + + module U1 + extend OpenAI::Internal::Type::Union + + variant -> { Integer }, const: 2, doc: "dog" + variant -> { String }, doc: "dawg" + end + + def test_meta_retrieval + m1 = A1.instance_variable_get(:@meta) + m2 = H1.instance_variable_get(:@meta) + assert_equal({doc: "dog"}, m1) + assert_equal({doc: "dawg"}, m2) + + ma, mb = M1.fields.fetch_values(:a, :b) + assert_equal({doc: "dog"}, ma.fetch(:meta)) + assert_equal({doc: "dawg"}, mb.fetch(:meta)) + + ua, ub = U1.send(:known_variants).map(&:last) + assert_equal({doc: "dog"}, ua) + assert_equal({doc: "dawg"}, ub) + end +end diff --git a/test/openai/internal/util_test.rb b/test/openai/internal/util_test.rb new file mode 100644 index 00000000..fc91db45 --- /dev/null +++ b/test/openai/internal/util_test.rb @@ -0,0 +1,594 @@ +# frozen_string_literal: true + +require_relative "../test_helper" + +class OpenAI::Test::UtilDataHandlingTest < Minitest::Test + def test_left_map + assert_pattern do + OpenAI::Internal::Util.deep_merge({a: 1}, nil) => nil + end + end + + def test_right_map + assert_pattern do + OpenAI::Internal::Util.deep_merge(nil, {a: 1}) => {a: 1} + end + end + + def test_disjoint_maps + assert_pattern do + OpenAI::Internal::Util.deep_merge({b: 2}, {a: 1}) => {a: 1, b: 2} + end + end + + def test_overlapping_maps + assert_pattern do + OpenAI::Internal::Util.deep_merge({b: 2, c: 3}, {a: 1, c: 4}) => {a: 1, b: 2, c: 4} + end + end + + def test_nested + assert_pattern do + OpenAI::Internal::Util.deep_merge({b: {b2: 1}}, {b: {b2: 2}}) => {b: {b2: 2}} + end + end + + def test_nested_left_map + assert_pattern do + OpenAI::Internal::Util.deep_merge({b: {b2: 1}}, {b: 6}) => {b: 6} + end + end + + def test_omission + merged = OpenAI::Internal::Util.deep_merge( + {b: {b2: 1, b3: {c: 4, d: 5}}}, + {b: {b2: 1, b3: {c: OpenAI::Internal::OMIT, d: 5}}} + ) + + assert_pattern do + merged => {b: {b2: 1, b3: {d: 5}}} + end + end + + def test_concat + merged = OpenAI::Internal::Util.deep_merge( + {a: {b: [1, 2]}}, + {a: {b: [3, 4]}}, + concat: true + ) + + assert_pattern do + merged => {a: {b: [1, 2, 3, 4]}} + end + end + + def test_concat_false + merged = OpenAI::Internal::Util.deep_merge( + {a: {b: [1, 2]}}, + {a: {b: [3, 4]}}, + concat: false + ) + + assert_pattern do + merged => {a: {b: [3, 4]}} + end + end + + def test_dig + assert_pattern do + OpenAI::Internal::Util.dig(1, nil) => 1 + OpenAI::Internal::Util.dig({a: 1}, :b) => nil + OpenAI::Internal::Util.dig({a: 1}, :a) => 1 + OpenAI::Internal::Util.dig({a: {b: 1}}, [:a, :b]) => 1 + + OpenAI::Internal::Util.dig([], 1) => nil + OpenAI::Internal::Util.dig([nil, [nil, 1]], [1, 1]) => 1 + OpenAI::Internal::Util.dig({a: [nil, 1]}, [:a, 1]) => 1 + OpenAI::Internal::Util.dig([], 1.0) => nil + + OpenAI::Internal::Util.dig(Object, 1) => nil + OpenAI::Internal::Util.dig([], 1.0) { 2 } => 2 + OpenAI::Internal::Util.dig([], ->(_) { 2 }) => 2 + OpenAI::Internal::Util.dig([1], -> { _1 in [1] }) => true + end + end +end + +class OpenAI::Test::UtilUriHandlingTest < Minitest::Test + def test_parsing + %w[ + http://example.com + https://example.com/ + https://example.com:443/example?e1=e1&e2=e2&e= + ].each do |url| + parsed = OpenAI::Internal::Util.parse_uri(url) + unparsed = OpenAI::Internal::Util.unparse_uri(parsed).to_s + + assert_equal(url, unparsed) + assert_equal(parsed, OpenAI::Internal::Util.parse_uri(unparsed)) + end + end + + def test_joining + cases = [ + [ + "h://a.b/c?d=e", + "h://nope/ignored", + OpenAI::Internal::Util.parse_uri("h://a.b/c?d=e") + ], + [ + "h://a.b/c?d=e", + "h://nope", + { + host: "a.b", + path: "/c", + query: {"d" => ["e"]} + } + ] + ] + + cases.each do |expect, lhs, rhs| + assert_equal( + URI.parse(expect), + OpenAI::Internal::Util.join_parsed_uri( + OpenAI::Internal::Util.parse_uri(lhs), + rhs + ) + ) + end + end + + def test_joining_queries + base_url = "h://a.b/c?d=e" + cases = { + "c2" => "h://a.b/c/c2", + "/c2?f=g" => "h://a.b/c2?f=g", + "/c?f=g" => "h://a.b/c?d=e&f=g" + } + + cases.each do |path, expected| + assert_equal( + URI.parse(expected), + OpenAI::Internal::Util.join_parsed_uri( + OpenAI::Internal::Util.parse_uri(base_url), + {path: path} + ) + ) + end + end +end + +class OpenAI::Test::RegexMatchTest < Minitest::Test + def test_json_content + cases = { + "application/json" => true, + "application/jsonl" => false, + "application/vnd.github.v3+json" => true, + "application/vnd.api+json" => true + } + cases.each do |header, verdict| + assert_pattern do + OpenAI::Internal::Util::JSON_CONTENT.match?(header) => ^verdict + end + end + end + + def test_jsonl_content + cases = { + "application/x-ndjson" => true, + "application/x-ldjson" => true, + "application/jsonl" => true, + "application/x-jsonl" => true, + "application/json" => false, + "application/vnd.api+json" => false + } + cases.each do |header, verdict| + assert_pattern do + OpenAI::Internal::Util::JSONL_CONTENT.match?(header) => ^verdict + end + end + end +end + +class OpenAI::Test::UtilFormDataEncodingTest < Minitest::Test + class FakeCGI < CGI + def initialize(headers, io) + encoded = io.to_a + @ctype = headers["content-type"] + # rubocop:disable Lint/EmptyBlock + @io = OpenAI::Internal::Util::ReadIOAdapter.new(encoded.to_enum) {} + # rubocop:enable Lint/EmptyBlock + @c_len = encoded.join.bytesize.to_s + super() + end + + def stdinput = @io + + def env_table + { + "REQUEST_METHOD" => "POST", + "CONTENT_TYPE" => @ctype, + "CONTENT_LENGTH" => @c_len + } + end + end + + def test_file_encode + file = Pathname(__FILE__) + headers = {"content-type" => "multipart/form-data"} + cases = { + "abc" => "abc", + StringIO.new("abc") => "abc", + OpenAI::FilePart.new("abc") => "abc", + OpenAI::FilePart.new(StringIO.new("abc")) => "abc", + file => /^class OpenAI/, + OpenAI::FilePart.new(file) => /^class OpenAI/ + } + cases.each do |body, val| + encoded = OpenAI::Internal::Util.encode_content(headers, body) + cgi = FakeCGI.new(*encoded) + assert_pattern do + cgi[""].read => ^val + end + end + end + + def test_hash_encode + headers = {"content-type" => "multipart/form-data"} + cases = { + {a: 2, b: 3} => {"a" => "2", "b" => "3"}, + {a: 2, b: nil} => {"a" => "2", "b" => "null"}, + {a: 2, b: [1, 2, 3]} => {"a" => "2", "b" => "1"}, + {strio: StringIO.new("a")} => {"strio" => "a"}, + {strio: OpenAI::FilePart.new("a")} => {"strio" => "a"}, + {pathname: Pathname(__FILE__)} => {"pathname" => -> { _1.read in /^class OpenAI/ }}, + {pathname: OpenAI::FilePart.new(Pathname(__FILE__))} => {"pathname" => -> { _1.read in /^class OpenAI/ }} + } + cases.each do |body, testcase| + encoded = OpenAI::Internal::Util.encode_content(headers, body) + cgi = FakeCGI.new(*encoded) + testcase.each do |key, val| + assert_pattern do + cgi[key] => ^val + end + end + end + end +end + +class OpenAI::Test::UtilIOAdapterTest < Minitest::Test + def test_copy_read + cases = { + StringIO.new("abc") => "abc", + Enumerator.new { _1 << "abc" } => "abc" + } + cases.each do |input, expected| + io = StringIO.new + # rubocop:disable Lint/EmptyBlock + adapter = OpenAI::Internal::Util::ReadIOAdapter.new(input) {} + # rubocop:enable Lint/EmptyBlock + IO.copy_stream(adapter, io) + assert_equal(expected, io.string) + end + end + + def test_copy_write + cases = { + StringIO.new => "", + StringIO.new("abc") => "abc" + } + cases.each do |input, expected| + enum = OpenAI::Internal::Util.writable_enum do |y| + IO.copy_stream(input, y) + end + assert_equal(expected, enum.to_a.join) + end + end +end + +class OpenAI::Test::UtilFusedEnumTest < Minitest::Test + def test_closing + arr = [1, 2, 3] + once = 0 + fused = OpenAI::Internal::Util.fused_enum(arr.to_enum) do + once = once.succ + end + + enumerated_1 = fused.to_a + assert_equal(arr, enumerated_1) + assert_equal(1, once) + + enumerated_2 = fused.to_a + assert_equal([], enumerated_2) + assert_equal(1, once) + end + + def test_rewind_chain + once = 0 + fused = OpenAI::Internal::Util.fused_enum([1, 2, 3].to_enum) do + once = once.succ + end + .lazy + .map(&:succ) + .filter(&:odd?) + first = fused.next + + assert_equal(3, first) + assert_equal(0, once) + assert_raises(StopIteration) { fused.rewind.next } + assert_equal(1, once) + end + + def test_external_iteration + iter = [1, 2, 3].to_enum + first = iter.next + fused = OpenAI::Internal::Util.fused_enum(iter, external: true) + + assert_equal(1, first) + assert_equal([2, 3], fused.to_a) + end + + def test_close_fused + once = 0 + fused = OpenAI::Internal::Util.fused_enum([1, 2, 3].to_enum) do + once = once.succ + end + + OpenAI::Internal::Util.close_fused!(fused) + + assert_equal(1, once) + assert_equal([], fused.to_a) + assert_equal(1, once) + end + + def test_closed_fused_extern_iteration + taken = 0 + enum = [1, 2, 3].to_enum.lazy.map do + taken = taken.succ + _1 + end + fused = OpenAI::Internal::Util.fused_enum(enum) + first = fused.next + + assert_equal(1, first) + OpenAI::Internal::Util.close_fused!(fused) + assert_equal(1, taken) + end + + def test_closed_fused_taken_count + taken = 0 + enum = [1, 2, 3].to_enum.lazy.map do + taken = taken.succ + _1 + end + .map(&:succ) + .filter(&:odd?) + fused = OpenAI::Internal::Util.fused_enum(enum) + + assert_equal(0, taken) + OpenAI::Internal::Util.close_fused!(fused) + assert_equal(0, taken) + end + + def test_closed_fused_extern_iter_taken_count + taken = 0 + enum = [1, 2, 3].to_enum.lazy.map do + taken = taken.succ + _1 + end + .map(&:succ) + .filter(&:itself) + first = enum.next + assert_equal(2, first) + assert_equal(1, taken) + + fused = OpenAI::Internal::Util.fused_enum(enum) + OpenAI::Internal::Util.close_fused!(fused) + assert_equal(1, taken) + end + + def test_close_fused_sse_chain + taken = 0 + enum = [1, 2, 3].to_enum.lazy.map do + taken = taken.succ + _1 + end + .map(&:succ) + .filter(&:odd?) + .map(&:to_s) + + fused_1 = OpenAI::Internal::Util.fused_enum(enum) + fused_2 = OpenAI::Internal::Util.decode_lines(fused_1) + fused_3 = OpenAI::Internal::Util.decode_sse(fused_2) + + assert_equal(0, taken) + OpenAI::Internal::Util.close_fused!(fused_3) + assert_equal(0, taken) + end +end + +class OpenAI::Test::UtilContentDecodingTest < Minitest::Test + def test_charset + cases = { + "application/json" => Encoding::BINARY, + "application/json; charset=utf-8" => Encoding::UTF_8, + "charset=uTf-8 application/json; " => Encoding::UTF_8, + "charset=UTF-8; application/json; " => Encoding::UTF_8, + "charset=ISO-8859-1 ;application/json; " => Encoding::ISO_8859_1, + "charset=EUC-KR ;application/json; " => Encoding::EUC_KR + } + text = String.new.force_encoding(Encoding::BINARY) + cases.each do |content_type, encoding| + OpenAI::Internal::Util.force_charset!(content_type, text: text) + assert_equal(encoding, text.encoding) + end + end +end + +class OpenAI::Test::UtilSseTest < Minitest::Test + def test_decode_lines + cases = { + %w[] => %w[], + %W[\n\n] => %W[\n \n], + %W[\n \n] => %W[\n \n], + %w[a] => %w[a], + %W[a\nb] => %W[a\n b], + %W[a\nb\n] => %W[a\n b\n], + %W[\na b\n] => %W[\n ab\n], + %W[\na b\n\n] => %W[\n ab\n \n], + %W[\na b] => %W[\n ab], + %W[\u1F62E\u200D\u1F4A8] => %W[\u1F62E\u200D\u1F4A8], + %W[\u1F62E \u200D \u1F4A8] => %W[\u1F62E\u200D\u1F4A8], + ["\xf0\x9f".b, "\xa5\xba".b] => ["\xf0\x9f\xa5\xba".b], + ["\xf0".b, "\x9f".b, "\xa5".b, "\xba".b] => ["\xf0\x9f\xa5\xba".b] + } + eols = %W[\n \r \r\n] + cases.each do |enum, expected| + eols.each do |eol| + lines = OpenAI::Internal::Util.decode_lines(enum.map { _1.gsub("\n", eol) }) + assert_equal(expected.map { _1.gsub("\n", eol) }, lines.to_a, "eol=#{JSON.generate(eol)}") + end + end + end + + def test_mixed_decode_lines + cases = { + %w[] => %w[], + %W[\r\r] => %W[\r \r], + %W[\r \r] => %W[\r \r], + %W[\r\r\r] => %W[\r \r \r], + %W[\r\r \r] => %W[\r \r \r], + %W[\r \n] => %W[\r\n], + %W[\r\r\n] => %W[\r \r\n], + %W[\n\r] => %W[\n \r] + } + cases.each do |enum, expected| + lines = OpenAI::Internal::Util.decode_lines(enum) + assert_equal(expected, lines.to_a) + end + end + + def test_decode_sse + cases = { + "empty input" => { + [] => [] + }, + "single data event" => { + [ + "data: hello world\n", + "\n" + ] => [ + {data: "hello world\n"} + ] + }, + "multiple data lines" => { + [ + "data: line 1\n", + "data: line 2\n", + "\n" + ] => [ + {data: "line 1\nline 2\n"} + ] + }, + "complete event" => { + [ + "id: 123\n", + "event: update\n", + "data: hello world\n", + "retry: 5000\n", + "\n" + ] => [ + { + event: "update", + id: "123", + data: "hello world\n", + retry: 5000 + } + ] + }, + "multiple events" => { + [ + "event: update\n", + "data: first\n", + "\n", + "event: message\n", + "data: second\n", + "\n" + ] => [ + {event: "update", data: "first\n"}, + {event: "message", data: "second\n"} + ] + }, + "comments" => { + [ + ": this is a comment\n", + "data: actual data\n", + "\n" + ] => [ + {data: "actual data\n"} + ] + }, + "invalid retry" => { + [ + "retry: not a number\n", + "data: hello\n", + "\n" + ] => [ + {data: "hello\n"} + ] + }, + "invalid id with null" => { + [ + "id: bad\0id\n", + "data: hello\n", + "\n" + ] => [ + {data: "hello\n"} + ] + }, + "leading space in value" => { + [ + "data: hello world\n", + "data: leading space\n", + "\n" + ] => [ + {data: "hello world\n leading space\n"} + ] + }, + "no final newline" => { + [ + "data: hello\n", + "id: 1" + ] => [ + {data: "hello\n", id: "1"} + ] + }, + "multiple empty lines" => { + [ + "data: first\n", + "\n", + "\n", + "data: second\n", + "\n" + ] => [ + {data: "first\n"}, + {data: "second\n"} + ] + }, + "multibyte unicode" => { + [ + "data: \u1F62E\u200D\u1F4A8\n" + ] => [ + {data: "\u1F62E\u200D\u1F4A8\n"} + ] + } + } + + cases.each do |name, test_cases| + test_cases.each do |input, expected| + actual = OpenAI::Internal::Util.decode_sse(input).map(&:compact) + assert_equal(expected, actual, name) + end + end + end +end diff --git a/test/openai/resource_namespaces.rb b/test/openai/resource_namespaces.rb index 2da61a74..db0dcbc7 100644 --- a/test/openai/resource_namespaces.rb +++ b/test/openai/resource_namespaces.rb @@ -3,6 +3,9 @@ module OpenAI module Test module Resources + module Alpha + end + module Audio end @@ -18,14 +21,42 @@ module Completions end end + module Checkpoints + end + module Completions end + module Containers + module Files + end + end + + module Conversations + end + + module Evals + module Runs + end + end + + module Files + end + module FineTuning + module Alpha + end + + module Checkpoints + end + module Jobs end end + module Graders + end + module Jobs end diff --git a/test/openai/resources/audio/speech_test.rb b/test/openai/resources/audio/speech_test.rb index e2d6de20..af4189d5 100644 --- a/test/openai/resources/audio/speech_test.rb +++ b/test/openai/resources/audio/speech_test.rb @@ -4,10 +4,12 @@ class OpenAI::Test::Resources::Audio::SpeechTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.audio.speech.create(input: "input", model: "string", voice: :alloy) + skip("Prism doesn't support application/octet-stream responses") + + response = @openai.audio.speech.create(input: "input", model: :"tts-1", voice: :alloy) assert_pattern do - response => OpenAI::Unknown + response => StringIO end end end diff --git a/test/openai/resources/audio/transcriptions_test.rb b/test/openai/resources/audio/transcriptions_test.rb index ca797160..f4f5b94b 100644 --- a/test/openai/resources/audio/transcriptions_test.rb +++ b/test/openai/resources/audio/transcriptions_test.rb @@ -4,10 +4,7 @@ class OpenAI::Test::Resources::Audio::TranscriptionsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.audio.transcriptions.create( - file: StringIO.new("some file contents"), - model: :"whisper-1" - ) + response = @openai.audio.transcriptions.create(file: Pathname(__FILE__), model: :"whisper-1") assert_pattern do response => OpenAI::Models::Audio::TranscriptionCreateResponse @@ -15,8 +12,8 @@ def test_create_required_params assert_pattern do case response - in OpenAI::Models::Audio::Transcription - in OpenAI::Models::Audio::TranscriptionVerbose + in OpenAI::Audio::Transcription + in OpenAI::Audio::TranscriptionVerbose end end end diff --git a/test/openai/resources/audio/translations_test.rb b/test/openai/resources/audio/translations_test.rb index f0b69d4f..feb7ccc7 100644 --- a/test/openai/resources/audio/translations_test.rb +++ b/test/openai/resources/audio/translations_test.rb @@ -4,10 +4,7 @@ class OpenAI::Test::Resources::Audio::TranslationsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.audio.translations.create( - file: StringIO.new("some file contents"), - model: :"whisper-1" - ) + response = @openai.audio.translations.create(file: Pathname(__FILE__), model: :"whisper-1") assert_pattern do response => OpenAI::Models::Audio::TranslationCreateResponse @@ -15,8 +12,8 @@ def test_create_required_params assert_pattern do case response - in OpenAI::Models::Audio::Translation - in OpenAI::Models::Audio::TranslationVerbose + in OpenAI::Audio::Translation + in OpenAI::Audio::TranslationVerbose end end end diff --git a/test/openai/resources/batches_test.rb b/test/openai/resources/batches_test.rb index 50ae1244..37bc1861 100644 --- a/test/openai/resources/batches_test.rb +++ b/test/openai/resources/batches_test.rb @@ -4,14 +4,15 @@ class OpenAI::Test::Resources::BatchesTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.batches.create( - completion_window: :"24h", - endpoint: :"/v1/chat/completions", - input_file_id: "input_file_id" - ) + response = + @openai.batches.create( + completion_window: :"24h", + endpoint: :"/v1/responses", + input_file_id: "input_file_id" + ) assert_pattern do - response => OpenAI::Models::Batch + response => OpenAI::Batch end assert_pattern do @@ -22,20 +23,20 @@ def test_create_required_params endpoint: String, input_file_id: String, object: Symbol, - status: OpenAI::Models::Batch::Status, + status: OpenAI::Batch::Status, cancelled_at: Integer | nil, cancelling_at: Integer | nil, completed_at: Integer | nil, error_file_id: String | nil, - errors: OpenAI::Models::Batch::Errors | nil, + errors: OpenAI::Batch::Errors | nil, expired_at: Integer | nil, expires_at: Integer | nil, failed_at: Integer | nil, finalizing_at: Integer | nil, in_progress_at: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, output_file_id: String | nil, - request_counts: OpenAI::Models::BatchRequestCounts | nil + request_counts: OpenAI::BatchRequestCounts | nil } end end @@ -44,7 +45,7 @@ def test_retrieve response = @openai.batches.retrieve("batch_id") assert_pattern do - response => OpenAI::Models::Batch + response => OpenAI::Batch end assert_pattern do @@ -55,20 +56,20 @@ def test_retrieve endpoint: String, input_file_id: String, object: Symbol, - status: OpenAI::Models::Batch::Status, + status: OpenAI::Batch::Status, cancelled_at: Integer | nil, cancelling_at: Integer | nil, completed_at: Integer | nil, error_file_id: String | nil, - errors: OpenAI::Models::Batch::Errors | nil, + errors: OpenAI::Batch::Errors | nil, expired_at: Integer | nil, expires_at: Integer | nil, failed_at: Integer | nil, finalizing_at: Integer | nil, in_progress_at: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, output_file_id: String | nil, - request_counts: OpenAI::Models::BatchRequestCounts | nil + request_counts: OpenAI::BatchRequestCounts | nil } end end @@ -77,12 +78,14 @@ def test_list response = @openai.batches.list assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::Batch + row => OpenAI::Batch end assert_pattern do @@ -93,20 +96,20 @@ def test_list endpoint: String, input_file_id: String, object: Symbol, - status: OpenAI::Models::Batch::Status, + status: OpenAI::Batch::Status, cancelled_at: Integer | nil, cancelling_at: Integer | nil, completed_at: Integer | nil, error_file_id: String | nil, - errors: OpenAI::Models::Batch::Errors | nil, + errors: OpenAI::Batch::Errors | nil, expired_at: Integer | nil, expires_at: Integer | nil, failed_at: Integer | nil, finalizing_at: Integer | nil, in_progress_at: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, output_file_id: String | nil, - request_counts: OpenAI::Models::BatchRequestCounts | nil + request_counts: OpenAI::BatchRequestCounts | nil } end end @@ -115,7 +118,7 @@ def test_cancel response = @openai.batches.cancel("batch_id") assert_pattern do - response => OpenAI::Models::Batch + response => OpenAI::Batch end assert_pattern do @@ -126,20 +129,20 @@ def test_cancel endpoint: String, input_file_id: String, object: Symbol, - status: OpenAI::Models::Batch::Status, + status: OpenAI::Batch::Status, cancelled_at: Integer | nil, cancelling_at: Integer | nil, completed_at: Integer | nil, error_file_id: String | nil, - errors: OpenAI::Models::Batch::Errors | nil, + errors: OpenAI::Batch::Errors | nil, expired_at: Integer | nil, expires_at: Integer | nil, failed_at: Integer | nil, finalizing_at: Integer | nil, in_progress_at: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, output_file_id: String | nil, - request_counts: OpenAI::Models::BatchRequestCounts | nil + request_counts: OpenAI::BatchRequestCounts | nil } end end diff --git a/test/openai/resources/beta/assistants_test.rb b/test/openai/resources/beta/assistants_test.rb index 175fb0d5..d10b07fd 100644 --- a/test/openai/resources/beta/assistants_test.rb +++ b/test/openai/resources/beta/assistants_test.rb @@ -4,10 +4,10 @@ class OpenAI::Test::Resources::Beta::AssistantsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.beta.assistants.create(model: :"gpt-4o") + response = @openai.beta.assistants.create(model: :"gpt-5") assert_pattern do - response => OpenAI::Models::Beta::Assistant + response => OpenAI::Beta::Assistant end assert_pattern do @@ -16,14 +16,14 @@ def test_create_required_params created_at: Integer, description: String | nil, instructions: String | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, name: String | nil, object: Symbol, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, temperature: Float | nil, - tool_resources: OpenAI::Models::Beta::Assistant::ToolResources | nil, + tool_resources: OpenAI::Beta::Assistant::ToolResources | nil, top_p: Float | nil } end @@ -33,7 +33,7 @@ def test_retrieve response = @openai.beta.assistants.retrieve("assistant_id") assert_pattern do - response => OpenAI::Models::Beta::Assistant + response => OpenAI::Beta::Assistant end assert_pattern do @@ -42,14 +42,14 @@ def test_retrieve created_at: Integer, description: String | nil, instructions: String | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, name: String | nil, object: Symbol, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, temperature: Float | nil, - tool_resources: OpenAI::Models::Beta::Assistant::ToolResources | nil, + tool_resources: OpenAI::Beta::Assistant::ToolResources | nil, top_p: Float | nil } end @@ -59,7 +59,7 @@ def test_update response = @openai.beta.assistants.update("assistant_id") assert_pattern do - response => OpenAI::Models::Beta::Assistant + response => OpenAI::Beta::Assistant end assert_pattern do @@ -68,14 +68,14 @@ def test_update created_at: Integer, description: String | nil, instructions: String | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, name: String | nil, object: Symbol, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, temperature: Float | nil, - tool_resources: OpenAI::Models::Beta::Assistant::ToolResources | nil, + tool_resources: OpenAI::Beta::Assistant::ToolResources | nil, top_p: Float | nil } end @@ -85,12 +85,14 @@ def test_list response = @openai.beta.assistants.list assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::Beta::Assistant + row => OpenAI::Beta::Assistant end assert_pattern do @@ -99,14 +101,14 @@ def test_list created_at: Integer, description: String | nil, instructions: String | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, name: String | nil, object: Symbol, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, temperature: Float | nil, - tool_resources: OpenAI::Models::Beta::Assistant::ToolResources | nil, + tool_resources: OpenAI::Beta::Assistant::ToolResources | nil, top_p: Float | nil } end @@ -116,13 +118,13 @@ def test_delete response = @openai.beta.assistants.delete("assistant_id") assert_pattern do - response => OpenAI::Models::Beta::AssistantDeleted + response => OpenAI::Beta::AssistantDeleted end assert_pattern do response => { id: String, - deleted: OpenAI::BooleanModel, + deleted: OpenAI::Internal::Type::Boolean, object: Symbol } end diff --git a/test/openai/resources/beta/threads/messages_test.rb b/test/openai/resources/beta/threads/messages_test.rb index 42f89d03..c2904ac0 100644 --- a/test/openai/resources/beta/threads/messages_test.rb +++ b/test/openai/resources/beta/threads/messages_test.rb @@ -7,24 +7,24 @@ def test_create_required_params response = @openai.beta.threads.messages.create("thread_id", content: "string", role: :user) assert_pattern do - response => OpenAI::Models::Beta::Threads::Message + response => OpenAI::Beta::Threads::Message end assert_pattern do response => { id: String, assistant_id: String | nil, - attachments: ^(OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::Message::Attachment]) | nil, + attachments: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::Message::Attachment]) | nil, completed_at: Integer | nil, - content: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::MessageContent]), + content: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::MessageContent]), created_at: Integer, incomplete_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Message::IncompleteDetails | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + incomplete_details: OpenAI::Beta::Threads::Message::IncompleteDetails | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, object: Symbol, - role: OpenAI::Models::Beta::Threads::Message::Role, + role: OpenAI::Beta::Threads::Message::Role, run_id: String | nil, - status: OpenAI::Models::Beta::Threads::Message::Status, + status: OpenAI::Beta::Threads::Message::Status, thread_id: String } end @@ -34,24 +34,24 @@ def test_retrieve_required_params response = @openai.beta.threads.messages.retrieve("message_id", thread_id: "thread_id") assert_pattern do - response => OpenAI::Models::Beta::Threads::Message + response => OpenAI::Beta::Threads::Message end assert_pattern do response => { id: String, assistant_id: String | nil, - attachments: ^(OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::Message::Attachment]) | nil, + attachments: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::Message::Attachment]) | nil, completed_at: Integer | nil, - content: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::MessageContent]), + content: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::MessageContent]), created_at: Integer, incomplete_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Message::IncompleteDetails | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + incomplete_details: OpenAI::Beta::Threads::Message::IncompleteDetails | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, object: Symbol, - role: OpenAI::Models::Beta::Threads::Message::Role, + role: OpenAI::Beta::Threads::Message::Role, run_id: String | nil, - status: OpenAI::Models::Beta::Threads::Message::Status, + status: OpenAI::Beta::Threads::Message::Status, thread_id: String } end @@ -61,24 +61,24 @@ def test_update_required_params response = @openai.beta.threads.messages.update("message_id", thread_id: "thread_id") assert_pattern do - response => OpenAI::Models::Beta::Threads::Message + response => OpenAI::Beta::Threads::Message end assert_pattern do response => { id: String, assistant_id: String | nil, - attachments: ^(OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::Message::Attachment]) | nil, + attachments: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::Message::Attachment]) | nil, completed_at: Integer | nil, - content: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::MessageContent]), + content: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::MessageContent]), created_at: Integer, incomplete_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Message::IncompleteDetails | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + incomplete_details: OpenAI::Beta::Threads::Message::IncompleteDetails | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, object: Symbol, - role: OpenAI::Models::Beta::Threads::Message::Role, + role: OpenAI::Beta::Threads::Message::Role, run_id: String | nil, - status: OpenAI::Models::Beta::Threads::Message::Status, + status: OpenAI::Beta::Threads::Message::Status, thread_id: String } end @@ -88,29 +88,31 @@ def test_list response = @openai.beta.threads.messages.list("thread_id") assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::Beta::Threads::Message + row => OpenAI::Beta::Threads::Message end assert_pattern do row => { id: String, assistant_id: String | nil, - attachments: ^(OpenAI::ArrayOf[OpenAI::Models::Beta::Threads::Message::Attachment]) | nil, + attachments: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::Message::Attachment]) | nil, completed_at: Integer | nil, - content: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::Threads::MessageContent]), + content: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::MessageContent]), created_at: Integer, incomplete_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Message::IncompleteDetails | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + incomplete_details: OpenAI::Beta::Threads::Message::IncompleteDetails | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, object: Symbol, - role: OpenAI::Models::Beta::Threads::Message::Role, + role: OpenAI::Beta::Threads::Message::Role, run_id: String | nil, - status: OpenAI::Models::Beta::Threads::Message::Status, + status: OpenAI::Beta::Threads::Message::Status, thread_id: String } end @@ -120,13 +122,13 @@ def test_delete_required_params response = @openai.beta.threads.messages.delete("message_id", thread_id: "thread_id") assert_pattern do - response => OpenAI::Models::Beta::Threads::MessageDeleted + response => OpenAI::Beta::Threads::MessageDeleted end assert_pattern do response => { id: String, - deleted: OpenAI::BooleanModel, + deleted: OpenAI::Internal::Type::Boolean, object: Symbol } end diff --git a/test/openai/resources/beta/threads/runs/steps_test.rb b/test/openai/resources/beta/threads/runs/steps_test.rb index e18e9336..bb9c7355 100644 --- a/test/openai/resources/beta/threads/runs/steps_test.rb +++ b/test/openai/resources/beta/threads/runs/steps_test.rb @@ -7,7 +7,7 @@ def test_retrieve_required_params response = @openai.beta.threads.runs.steps.retrieve("step_id", thread_id: "thread_id", run_id: "run_id") assert_pattern do - response => OpenAI::Models::Beta::Threads::Runs::RunStep + response => OpenAI::Beta::Threads::Runs::RunStep end assert_pattern do @@ -19,15 +19,15 @@ def test_retrieve_required_params created_at: Integer, expired_at: Integer | nil, failed_at: Integer | nil, - last_error: OpenAI::Models::Beta::Threads::Runs::RunStep::LastError | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + last_error: OpenAI::Beta::Threads::Runs::RunStep::LastError | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, object: Symbol, run_id: String, - status: OpenAI::Models::Beta::Threads::Runs::RunStep::Status, - step_details: OpenAI::Models::Beta::Threads::Runs::RunStep::StepDetails, + status: OpenAI::Beta::Threads::Runs::RunStep::Status, + step_details: OpenAI::Beta::Threads::Runs::RunStep::StepDetails, thread_id: String, - type: OpenAI::Models::Beta::Threads::Runs::RunStep::Type, - usage: OpenAI::Models::Beta::Threads::Runs::RunStep::Usage | nil + type: OpenAI::Beta::Threads::Runs::RunStep::Type, + usage: OpenAI::Beta::Threads::Runs::RunStep::Usage | nil } end end @@ -36,12 +36,14 @@ def test_list_required_params response = @openai.beta.threads.runs.steps.list("run_id", thread_id: "thread_id") assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::Beta::Threads::Runs::RunStep + row => OpenAI::Beta::Threads::Runs::RunStep end assert_pattern do @@ -53,15 +55,15 @@ def test_list_required_params created_at: Integer, expired_at: Integer | nil, failed_at: Integer | nil, - last_error: OpenAI::Models::Beta::Threads::Runs::RunStep::LastError | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + last_error: OpenAI::Beta::Threads::Runs::RunStep::LastError | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, object: Symbol, run_id: String, - status: OpenAI::Models::Beta::Threads::Runs::RunStep::Status, - step_details: OpenAI::Models::Beta::Threads::Runs::RunStep::StepDetails, + status: OpenAI::Beta::Threads::Runs::RunStep::Status, + step_details: OpenAI::Beta::Threads::Runs::RunStep::StepDetails, thread_id: String, - type: OpenAI::Models::Beta::Threads::Runs::RunStep::Type, - usage: OpenAI::Models::Beta::Threads::Runs::RunStep::Usage | nil + type: OpenAI::Beta::Threads::Runs::RunStep::Type, + usage: OpenAI::Beta::Threads::Runs::RunStep::Usage | nil } end end diff --git a/test/openai/resources/beta/threads/runs_test.rb b/test/openai/resources/beta/threads/runs_test.rb index 98123435..bed1e829 100644 --- a/test/openai/resources/beta/threads/runs_test.rb +++ b/test/openai/resources/beta/threads/runs_test.rb @@ -4,10 +4,10 @@ class OpenAI::Test::Resources::Beta::Threads::RunsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.beta.threads.runs.create("thread_id", assistant_id: "assistant_id", stream: true) + response = @openai.beta.threads.runs.create("thread_id", assistant_id: "assistant_id") assert_pattern do - response => OpenAI::Models::Beta::Threads::Run + response => OpenAI::Beta::Threads::Run end assert_pattern do @@ -19,24 +19,24 @@ def test_create_required_params created_at: Integer, expires_at: Integer | nil, failed_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Run::IncompleteDetails | nil, + incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails | nil, instructions: String, - last_error: OpenAI::Models::Beta::Threads::Run::LastError | nil, + last_error: OpenAI::Beta::Threads::Run::LastError | nil, max_completion_tokens: Integer | nil, max_prompt_tokens: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, object: Symbol, - parallel_tool_calls: OpenAI::BooleanModel, - required_action: OpenAI::Models::Beta::Threads::Run::RequiredAction | nil, - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + parallel_tool_calls: OpenAI::Internal::Type::Boolean, + required_action: OpenAI::Beta::Threads::Run::RequiredAction | nil, + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, started_at: Integer | nil, - status: OpenAI::Models::Beta::Threads::RunStatus, + status: OpenAI::Beta::Threads::RunStatus, thread_id: String, - tool_choice: OpenAI::Models::Beta::AssistantToolChoiceOption | nil, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - truncation_strategy: OpenAI::Models::Beta::Threads::Run::TruncationStrategy | nil, - usage: OpenAI::Models::Beta::Threads::Run::Usage | nil, + tool_choice: OpenAI::Beta::AssistantToolChoiceOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy | nil, + usage: OpenAI::Beta::Threads::Run::Usage | nil, temperature: Float | nil, top_p: Float | nil } @@ -47,7 +47,7 @@ def test_retrieve_required_params response = @openai.beta.threads.runs.retrieve("run_id", thread_id: "thread_id") assert_pattern do - response => OpenAI::Models::Beta::Threads::Run + response => OpenAI::Beta::Threads::Run end assert_pattern do @@ -59,24 +59,24 @@ def test_retrieve_required_params created_at: Integer, expires_at: Integer | nil, failed_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Run::IncompleteDetails | nil, + incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails | nil, instructions: String, - last_error: OpenAI::Models::Beta::Threads::Run::LastError | nil, + last_error: OpenAI::Beta::Threads::Run::LastError | nil, max_completion_tokens: Integer | nil, max_prompt_tokens: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, object: Symbol, - parallel_tool_calls: OpenAI::BooleanModel, - required_action: OpenAI::Models::Beta::Threads::Run::RequiredAction | nil, - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + parallel_tool_calls: OpenAI::Internal::Type::Boolean, + required_action: OpenAI::Beta::Threads::Run::RequiredAction | nil, + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, started_at: Integer | nil, - status: OpenAI::Models::Beta::Threads::RunStatus, + status: OpenAI::Beta::Threads::RunStatus, thread_id: String, - tool_choice: OpenAI::Models::Beta::AssistantToolChoiceOption | nil, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - truncation_strategy: OpenAI::Models::Beta::Threads::Run::TruncationStrategy | nil, - usage: OpenAI::Models::Beta::Threads::Run::Usage | nil, + tool_choice: OpenAI::Beta::AssistantToolChoiceOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy | nil, + usage: OpenAI::Beta::Threads::Run::Usage | nil, temperature: Float | nil, top_p: Float | nil } @@ -87,7 +87,7 @@ def test_update_required_params response = @openai.beta.threads.runs.update("run_id", thread_id: "thread_id") assert_pattern do - response => OpenAI::Models::Beta::Threads::Run + response => OpenAI::Beta::Threads::Run end assert_pattern do @@ -99,24 +99,24 @@ def test_update_required_params created_at: Integer, expires_at: Integer | nil, failed_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Run::IncompleteDetails | nil, + incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails | nil, instructions: String, - last_error: OpenAI::Models::Beta::Threads::Run::LastError | nil, + last_error: OpenAI::Beta::Threads::Run::LastError | nil, max_completion_tokens: Integer | nil, max_prompt_tokens: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, object: Symbol, - parallel_tool_calls: OpenAI::BooleanModel, - required_action: OpenAI::Models::Beta::Threads::Run::RequiredAction | nil, - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + parallel_tool_calls: OpenAI::Internal::Type::Boolean, + required_action: OpenAI::Beta::Threads::Run::RequiredAction | nil, + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, started_at: Integer | nil, - status: OpenAI::Models::Beta::Threads::RunStatus, + status: OpenAI::Beta::Threads::RunStatus, thread_id: String, - tool_choice: OpenAI::Models::Beta::AssistantToolChoiceOption | nil, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - truncation_strategy: OpenAI::Models::Beta::Threads::Run::TruncationStrategy | nil, - usage: OpenAI::Models::Beta::Threads::Run::Usage | nil, + tool_choice: OpenAI::Beta::AssistantToolChoiceOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy | nil, + usage: OpenAI::Beta::Threads::Run::Usage | nil, temperature: Float | nil, top_p: Float | nil } @@ -127,12 +127,14 @@ def test_list response = @openai.beta.threads.runs.list("thread_id") assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::Beta::Threads::Run + row => OpenAI::Beta::Threads::Run end assert_pattern do @@ -144,24 +146,24 @@ def test_list created_at: Integer, expires_at: Integer | nil, failed_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Run::IncompleteDetails | nil, + incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails | nil, instructions: String, - last_error: OpenAI::Models::Beta::Threads::Run::LastError | nil, + last_error: OpenAI::Beta::Threads::Run::LastError | nil, max_completion_tokens: Integer | nil, max_prompt_tokens: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, object: Symbol, - parallel_tool_calls: OpenAI::BooleanModel, - required_action: OpenAI::Models::Beta::Threads::Run::RequiredAction | nil, - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + parallel_tool_calls: OpenAI::Internal::Type::Boolean, + required_action: OpenAI::Beta::Threads::Run::RequiredAction | nil, + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, started_at: Integer | nil, - status: OpenAI::Models::Beta::Threads::RunStatus, + status: OpenAI::Beta::Threads::RunStatus, thread_id: String, - tool_choice: OpenAI::Models::Beta::AssistantToolChoiceOption | nil, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - truncation_strategy: OpenAI::Models::Beta::Threads::Run::TruncationStrategy | nil, - usage: OpenAI::Models::Beta::Threads::Run::Usage | nil, + tool_choice: OpenAI::Beta::AssistantToolChoiceOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy | nil, + usage: OpenAI::Beta::Threads::Run::Usage | nil, temperature: Float | nil, top_p: Float | nil } @@ -172,7 +174,7 @@ def test_cancel_required_params response = @openai.beta.threads.runs.cancel("run_id", thread_id: "thread_id") assert_pattern do - response => OpenAI::Models::Beta::Threads::Run + response => OpenAI::Beta::Threads::Run end assert_pattern do @@ -184,24 +186,24 @@ def test_cancel_required_params created_at: Integer, expires_at: Integer | nil, failed_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Run::IncompleteDetails | nil, + incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails | nil, instructions: String, - last_error: OpenAI::Models::Beta::Threads::Run::LastError | nil, + last_error: OpenAI::Beta::Threads::Run::LastError | nil, max_completion_tokens: Integer | nil, max_prompt_tokens: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, object: Symbol, - parallel_tool_calls: OpenAI::BooleanModel, - required_action: OpenAI::Models::Beta::Threads::Run::RequiredAction | nil, - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + parallel_tool_calls: OpenAI::Internal::Type::Boolean, + required_action: OpenAI::Beta::Threads::Run::RequiredAction | nil, + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, started_at: Integer | nil, - status: OpenAI::Models::Beta::Threads::RunStatus, + status: OpenAI::Beta::Threads::RunStatus, thread_id: String, - tool_choice: OpenAI::Models::Beta::AssistantToolChoiceOption | nil, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - truncation_strategy: OpenAI::Models::Beta::Threads::Run::TruncationStrategy | nil, - usage: OpenAI::Models::Beta::Threads::Run::Usage | nil, + tool_choice: OpenAI::Beta::AssistantToolChoiceOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy | nil, + usage: OpenAI::Beta::Threads::Run::Usage | nil, temperature: Float | nil, top_p: Float | nil } @@ -209,15 +211,11 @@ def test_cancel_required_params end def test_submit_tool_outputs_required_params - response = @openai.beta.threads.runs.submit_tool_outputs( - "run_id", - thread_id: "thread_id", - stream: true, - tool_outputs: [{}] - ) + response = + @openai.beta.threads.runs.submit_tool_outputs("run_id", thread_id: "thread_id", tool_outputs: [{}]) assert_pattern do - response => OpenAI::Models::Beta::Threads::Run + response => OpenAI::Beta::Threads::Run end assert_pattern do @@ -229,24 +227,24 @@ def test_submit_tool_outputs_required_params created_at: Integer, expires_at: Integer | nil, failed_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Run::IncompleteDetails | nil, + incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails | nil, instructions: String, - last_error: OpenAI::Models::Beta::Threads::Run::LastError | nil, + last_error: OpenAI::Beta::Threads::Run::LastError | nil, max_completion_tokens: Integer | nil, max_prompt_tokens: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, object: Symbol, - parallel_tool_calls: OpenAI::BooleanModel, - required_action: OpenAI::Models::Beta::Threads::Run::RequiredAction | nil, - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + parallel_tool_calls: OpenAI::Internal::Type::Boolean, + required_action: OpenAI::Beta::Threads::Run::RequiredAction | nil, + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, started_at: Integer | nil, - status: OpenAI::Models::Beta::Threads::RunStatus, + status: OpenAI::Beta::Threads::RunStatus, thread_id: String, - tool_choice: OpenAI::Models::Beta::AssistantToolChoiceOption | nil, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - truncation_strategy: OpenAI::Models::Beta::Threads::Run::TruncationStrategy | nil, - usage: OpenAI::Models::Beta::Threads::Run::Usage | nil, + tool_choice: OpenAI::Beta::AssistantToolChoiceOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy | nil, + usage: OpenAI::Beta::Threads::Run::Usage | nil, temperature: Float | nil, top_p: Float | nil } diff --git a/test/openai/resources/beta/threads_test.rb b/test/openai/resources/beta/threads_test.rb index 4203a6f3..903a5185 100644 --- a/test/openai/resources/beta/threads_test.rb +++ b/test/openai/resources/beta/threads_test.rb @@ -7,16 +7,16 @@ def test_create response = @openai.beta.threads.create assert_pattern do - response => OpenAI::Models::Beta::Thread + response => OpenAI::Beta::Thread end assert_pattern do response => { id: String, created_at: Integer, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, object: Symbol, - tool_resources: OpenAI::Models::Beta::Thread::ToolResources | nil + tool_resources: OpenAI::Beta::Thread::ToolResources | nil } end end @@ -25,16 +25,16 @@ def test_retrieve response = @openai.beta.threads.retrieve("thread_id") assert_pattern do - response => OpenAI::Models::Beta::Thread + response => OpenAI::Beta::Thread end assert_pattern do response => { id: String, created_at: Integer, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, object: Symbol, - tool_resources: OpenAI::Models::Beta::Thread::ToolResources | nil + tool_resources: OpenAI::Beta::Thread::ToolResources | nil } end end @@ -43,16 +43,16 @@ def test_update response = @openai.beta.threads.update("thread_id") assert_pattern do - response => OpenAI::Models::Beta::Thread + response => OpenAI::Beta::Thread end assert_pattern do response => { id: String, created_at: Integer, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, object: Symbol, - tool_resources: OpenAI::Models::Beta::Thread::ToolResources | nil + tool_resources: OpenAI::Beta::Thread::ToolResources | nil } end end @@ -61,23 +61,23 @@ def test_delete response = @openai.beta.threads.delete("thread_id") assert_pattern do - response => OpenAI::Models::Beta::ThreadDeleted + response => OpenAI::Beta::ThreadDeleted end assert_pattern do response => { id: String, - deleted: OpenAI::BooleanModel, + deleted: OpenAI::Internal::Type::Boolean, object: Symbol } end end def test_create_and_run_required_params - response = @openai.beta.threads.create_and_run(assistant_id: "assistant_id", stream: true) + response = @openai.beta.threads.create_and_run(assistant_id: "assistant_id") assert_pattern do - response => OpenAI::Models::Beta::Threads::Run + response => OpenAI::Beta::Threads::Run end assert_pattern do @@ -89,24 +89,24 @@ def test_create_and_run_required_params created_at: Integer, expires_at: Integer | nil, failed_at: Integer | nil, - incomplete_details: OpenAI::Models::Beta::Threads::Run::IncompleteDetails | nil, + incomplete_details: OpenAI::Beta::Threads::Run::IncompleteDetails | nil, instructions: String, - last_error: OpenAI::Models::Beta::Threads::Run::LastError | nil, + last_error: OpenAI::Beta::Threads::Run::LastError | nil, max_completion_tokens: Integer | nil, max_prompt_tokens: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, model: String, object: Symbol, - parallel_tool_calls: OpenAI::BooleanModel, - required_action: OpenAI::Models::Beta::Threads::Run::RequiredAction | nil, - response_format: OpenAI::Models::Beta::AssistantResponseFormatOption | nil, + parallel_tool_calls: OpenAI::Internal::Type::Boolean, + required_action: OpenAI::Beta::Threads::Run::RequiredAction | nil, + response_format: OpenAI::Beta::AssistantResponseFormatOption | nil, started_at: Integer | nil, - status: OpenAI::Models::Beta::Threads::RunStatus, + status: OpenAI::Beta::Threads::RunStatus, thread_id: String, - tool_choice: OpenAI::Models::Beta::AssistantToolChoiceOption | nil, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Beta::AssistantTool]), - truncation_strategy: OpenAI::Models::Beta::Threads::Run::TruncationStrategy | nil, - usage: OpenAI::Models::Beta::Threads::Run::Usage | nil, + tool_choice: OpenAI::Beta::AssistantToolChoiceOption | nil, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]), + truncation_strategy: OpenAI::Beta::Threads::Run::TruncationStrategy | nil, + usage: OpenAI::Beta::Threads::Run::Usage | nil, temperature: Float | nil, top_p: Float | nil } diff --git a/test/openai/resources/chat/completions/messages_test.rb b/test/openai/resources/chat/completions/messages_test.rb index 72c10f96..99f3a865 100644 --- a/test/openai/resources/chat/completions/messages_test.rb +++ b/test/openai/resources/chat/completions/messages_test.rb @@ -7,12 +7,14 @@ def test_list response = @openai.chat.completions.messages.list("completion_id") assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::Chat::ChatCompletionStoreMessage + row => OpenAI::Chat::ChatCompletionStoreMessage end end end diff --git a/test/openai/resources/chat/completions_test.rb b/test/openai/resources/chat/completions_test.rb index c45ef70f..e4e742e8 100644 --- a/test/openai/resources/chat/completions_test.rb +++ b/test/openai/resources/chat/completions_test.rb @@ -4,26 +4,23 @@ class OpenAI::Test::Resources::Chat::CompletionsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.chat.completions.create( - messages: [{content: "string", role: :developer}], - model: :"gpt-4o", - stream: true - ) + response = + @openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") assert_pattern do - response => OpenAI::Models::Chat::ChatCompletion + response => OpenAI::Chat::ChatCompletion end assert_pattern do response => { id: String, - choices: ^(OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletion::Choice]), + choices: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletion::Choice]), created: Integer, model: String, object: Symbol, - service_tier: OpenAI::Models::Chat::ChatCompletion::ServiceTier | nil, + service_tier: OpenAI::Chat::ChatCompletion::ServiceTier | nil, system_fingerprint: String | nil, - usage: OpenAI::Models::CompletionUsage | nil + usage: OpenAI::CompletionUsage | nil } end end @@ -32,19 +29,19 @@ def test_retrieve response = @openai.chat.completions.retrieve("completion_id") assert_pattern do - response => OpenAI::Models::Chat::ChatCompletion + response => OpenAI::Chat::ChatCompletion end assert_pattern do response => { id: String, - choices: ^(OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletion::Choice]), + choices: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletion::Choice]), created: Integer, model: String, object: Symbol, - service_tier: OpenAI::Models::Chat::ChatCompletion::ServiceTier | nil, + service_tier: OpenAI::Chat::ChatCompletion::ServiceTier | nil, system_fingerprint: String | nil, - usage: OpenAI::Models::CompletionUsage | nil + usage: OpenAI::CompletionUsage | nil } end end @@ -53,19 +50,19 @@ def test_update_required_params response = @openai.chat.completions.update("completion_id", metadata: {foo: "string"}) assert_pattern do - response => OpenAI::Models::Chat::ChatCompletion + response => OpenAI::Chat::ChatCompletion end assert_pattern do response => { id: String, - choices: ^(OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletion::Choice]), + choices: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletion::Choice]), created: Integer, model: String, object: Symbol, - service_tier: OpenAI::Models::Chat::ChatCompletion::ServiceTier | nil, + service_tier: OpenAI::Chat::ChatCompletion::ServiceTier | nil, system_fingerprint: String | nil, - usage: OpenAI::Models::CompletionUsage | nil + usage: OpenAI::CompletionUsage | nil } end end @@ -74,24 +71,26 @@ def test_list response = @openai.chat.completions.list assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::Chat::ChatCompletion + row => OpenAI::Chat::ChatCompletion end assert_pattern do row => { id: String, - choices: ^(OpenAI::ArrayOf[OpenAI::Models::Chat::ChatCompletion::Choice]), + choices: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletion::Choice]), created: Integer, model: String, object: Symbol, - service_tier: OpenAI::Models::Chat::ChatCompletion::ServiceTier | nil, + service_tier: OpenAI::Chat::ChatCompletion::ServiceTier | nil, system_fingerprint: String | nil, - usage: OpenAI::Models::CompletionUsage | nil + usage: OpenAI::CompletionUsage | nil } end end @@ -100,13 +99,13 @@ def test_delete response = @openai.chat.completions.delete("completion_id") assert_pattern do - response => OpenAI::Models::Chat::ChatCompletionDeleted + response => OpenAI::Chat::ChatCompletionDeleted end assert_pattern do response => { id: String, - deleted: OpenAI::BooleanModel, + deleted: OpenAI::Internal::Type::Boolean, object: Symbol } end diff --git a/test/openai/resources/completions_test.rb b/test/openai/resources/completions_test.rb index 0ff1c63c..349c682e 100644 --- a/test/openai/resources/completions_test.rb +++ b/test/openai/resources/completions_test.rb @@ -4,21 +4,21 @@ class OpenAI::Test::Resources::CompletionsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.completions.create(model: "string", prompt: "This is a test.", stream: true) + response = @openai.completions.create(model: :"gpt-3.5-turbo-instruct", prompt: "This is a test.") assert_pattern do - response => OpenAI::Models::Completion + response => OpenAI::Completion end assert_pattern do response => { id: String, - choices: ^(OpenAI::ArrayOf[OpenAI::Models::CompletionChoice]), + choices: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::CompletionChoice]), created: Integer, model: String, object: Symbol, system_fingerprint: String | nil, - usage: OpenAI::Models::CompletionUsage | nil + usage: OpenAI::CompletionUsage | nil } end end diff --git a/test/openai/resources/containers/files/content_test.rb b/test/openai/resources/containers/files/content_test.rb new file mode 100644 index 00000000..0a57b6cb --- /dev/null +++ b/test/openai/resources/containers/files/content_test.rb @@ -0,0 +1,15 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +class OpenAI::Test::Resources::Containers::Files::ContentTest < OpenAI::Test::ResourceTest + def test_retrieve_required_params + skip("Prism doesn't support application/binary responses") + + response = @openai.containers.files.content.retrieve("file_id", container_id: "container_id") + + assert_pattern do + response => StringIO + end + end +end diff --git a/test/openai/resources/containers/files_test.rb b/test/openai/resources/containers/files_test.rb new file mode 100644 index 00000000..d2522cc7 --- /dev/null +++ b/test/openai/resources/containers/files_test.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +class OpenAI::Test::Resources::Containers::FilesTest < OpenAI::Test::ResourceTest + def test_create + response = @openai.containers.files.create("container_id") + + assert_pattern do + response => OpenAI::Models::Containers::FileCreateResponse + end + + assert_pattern do + response => { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: Symbol, + path: String, + source: String + } + end + end + + def test_retrieve_required_params + response = @openai.containers.files.retrieve("file_id", container_id: "container_id") + + assert_pattern do + response => OpenAI::Models::Containers::FileRetrieveResponse + end + + assert_pattern do + response => { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: Symbol, + path: String, + source: String + } + end + end + + def test_list + response = @openai.containers.files.list("container_id") + + assert_pattern do + response => OpenAI::Internal::CursorPage + end + + row = response.to_enum.first + return if row.nil? + + assert_pattern do + row => OpenAI::Models::Containers::FileListResponse + end + + assert_pattern do + row => { + id: String, + bytes: Integer, + container_id: String, + created_at: Integer, + object: Symbol, + path: String, + source: String + } + end + end + + def test_delete_required_params + response = @openai.containers.files.delete("file_id", container_id: "container_id") + + assert_pattern do + response => nil + end + end +end diff --git a/test/openai/resources/containers_test.rb b/test/openai/resources/containers_test.rb new file mode 100644 index 00000000..a04742ee --- /dev/null +++ b/test/openai/resources/containers_test.rb @@ -0,0 +1,77 @@ +# frozen_string_literal: true + +require_relative "../test_helper" + +class OpenAI::Test::Resources::ContainersTest < OpenAI::Test::ResourceTest + def test_create_required_params + response = @openai.containers.create(name: "name") + + assert_pattern do + response => OpenAI::Models::ContainerCreateResponse + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerCreateResponse::ExpiresAfter | nil + } + end + end + + def test_retrieve + response = @openai.containers.retrieve("container_id") + + assert_pattern do + response => OpenAI::Models::ContainerRetrieveResponse + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter | nil + } + end + end + + def test_list + response = @openai.containers.list + + assert_pattern do + response => OpenAI::Internal::CursorPage + end + + row = response.to_enum.first + return if row.nil? + + assert_pattern do + row => OpenAI::Models::ContainerListResponse + end + + assert_pattern do + row => { + id: String, + created_at: Integer, + name: String, + object: String, + status: String, + expires_after: OpenAI::Models::ContainerListResponse::ExpiresAfter | nil + } + end + end + + def test_delete + response = @openai.containers.delete("container_id") + + assert_pattern do + response => nil + end + end +end diff --git a/test/openai/resources/conversations/items_test.rb b/test/openai/resources/conversations/items_test.rb new file mode 100644 index 00000000..bb012cd0 --- /dev/null +++ b/test/openai/resources/conversations/items_test.rb @@ -0,0 +1,327 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +class OpenAI::Test::Resources::Conversations::ItemsTest < OpenAI::Test::ResourceTest + def test_create_required_params + response = @openai.conversations.items.create("conv_123", items: [{content: "string", role: :user}]) + + assert_pattern do + response => OpenAI::Conversations::ConversationItemList + end + + assert_pattern do + response => { + data: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::ConversationItem]), + first_id: String, + has_more: OpenAI::Internal::Type::Boolean, + last_id: String, + object: Symbol + } + end + end + + def test_retrieve_required_params + response = @openai.conversations.items.retrieve("msg_abc", conversation_id: "conv_123") + + assert_pattern do + response => OpenAI::Conversations::ConversationItem + end + + assert_pattern do + case response + in OpenAI::Conversations::Message + in OpenAI::Responses::ResponseFunctionToolCallItem + in OpenAI::Responses::ResponseFunctionToolCallOutputItem + in OpenAI::Responses::ResponseFileSearchToolCall + in OpenAI::Responses::ResponseFunctionWebSearch + in OpenAI::Conversations::ConversationItem::ImageGenerationCall + in OpenAI::Responses::ResponseComputerToolCall + in OpenAI::Responses::ResponseComputerToolCallOutputItem + in OpenAI::Responses::ResponseReasoningItem + in OpenAI::Responses::ResponseCodeInterpreterToolCall + in OpenAI::Conversations::ConversationItem::LocalShellCall + in OpenAI::Conversations::ConversationItem::LocalShellCallOutput + in OpenAI::Conversations::ConversationItem::McpListTools + in OpenAI::Conversations::ConversationItem::McpApprovalRequest + in OpenAI::Conversations::ConversationItem::McpApprovalResponse + in OpenAI::Conversations::ConversationItem::McpCall + in OpenAI::Responses::ResponseCustomToolCall + in OpenAI::Responses::ResponseCustomToolCallOutput + end + end + + assert_pattern do + case response + in { + type: :message, + id: String, + content: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::Message::Content]), + role: OpenAI::Conversations::Message::Role, + status: OpenAI::Conversations::Message::Status + } + in { + type: :function_call_output, + id: String, + call_id: String, + output: String, + status: OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status | nil + } + in { + type: :file_search_call, + id: String, + queries: ^(OpenAI::Internal::Type::ArrayOf[String]), + status: OpenAI::Responses::ResponseFileSearchToolCall::Status, + results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFileSearchToolCall::Result]) | nil + } + in { + type: :web_search_call, + id: String, + action: OpenAI::Responses::ResponseFunctionWebSearch::Action, + status: OpenAI::Responses::ResponseFunctionWebSearch::Status + } + in { + type: :image_generation_call, + id: String, + result: String | nil, + status: OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status + } + in { + type: :computer_call, + id: String, + action: OpenAI::Responses::ResponseComputerToolCall::Action, + call_id: String, + pending_safety_checks: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck]), + status: OpenAI::Responses::ResponseComputerToolCall::Status + } + in { + type: :computer_call_output, + id: String, + call_id: String, + output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + acknowledged_safety_checks: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck]) | nil, + status: OpenAI::Responses::ResponseComputerToolCallOutputItem::Status | nil + } + in { + type: :reasoning, + id: String, + summary: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseReasoningItem::Summary]), + content: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseReasoningItem::Content]) | nil, + encrypted_content: String | nil, + status: OpenAI::Responses::ResponseReasoningItem::Status | nil + } + in { + type: :code_interpreter_call, + id: String, + code: String | nil, + container_id: String, + outputs: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseCodeInterpreterToolCall::Output]) | nil, + status: OpenAI::Responses::ResponseCodeInterpreterToolCall::Status + } + in { + type: :local_shell_call, + id: String, + action: OpenAI::Conversations::ConversationItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Conversations::ConversationItem::LocalShellCall::Status + } + in { + type: :local_shell_call_output, + id: String, + output: String, + status: OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status | nil + } + in { + type: :mcp_list_tools, + id: String, + server_label: String, + tools: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Conversations::ConversationItem::McpListTools::Tool]), + error: String | nil + } + in {type: :mcp_approval_request, id: String, arguments: String, name: String, server_label: String} + in { + type: :mcp_approval_response, + id: String, + approval_request_id: String, + approve: OpenAI::Internal::Type::Boolean, + reason: String | nil + } + in { + type: :mcp_call, + id: String, + arguments: String, + name: String, + server_label: String, + error: String | nil, + output: String | nil + } + in {type: :custom_tool_call, call_id: String, input: String, name: String, id: String | nil} + in {type: :custom_tool_call_output, call_id: String, output: String, id: String | nil} + end + end + end + + def test_list + response = @openai.conversations.items.list("conv_123") + + assert_pattern do + response => OpenAI::Internal::ConversationCursorPage + end + + row = response.to_enum.first + return if row.nil? + + assert_pattern do + row => OpenAI::Conversations::ConversationItem + end + + assert_pattern do + case row + in OpenAI::Conversations::Message + in OpenAI::Responses::ResponseFunctionToolCallItem + in OpenAI::Responses::ResponseFunctionToolCallOutputItem + in OpenAI::Responses::ResponseFileSearchToolCall + in OpenAI::Responses::ResponseFunctionWebSearch + in OpenAI::Conversations::ConversationItem::ImageGenerationCall + in OpenAI::Responses::ResponseComputerToolCall + in OpenAI::Responses::ResponseComputerToolCallOutputItem + in OpenAI::Responses::ResponseReasoningItem + in OpenAI::Responses::ResponseCodeInterpreterToolCall + in OpenAI::Conversations::ConversationItem::LocalShellCall + in OpenAI::Conversations::ConversationItem::LocalShellCallOutput + in OpenAI::Conversations::ConversationItem::McpListTools + in OpenAI::Conversations::ConversationItem::McpApprovalRequest + in OpenAI::Conversations::ConversationItem::McpApprovalResponse + in OpenAI::Conversations::ConversationItem::McpCall + in OpenAI::Responses::ResponseCustomToolCall + in OpenAI::Responses::ResponseCustomToolCallOutput + end + end + + assert_pattern do + case row + in { + type: :message, + id: String, + content: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::Message::Content]), + role: OpenAI::Conversations::Message::Role, + status: OpenAI::Conversations::Message::Status + } + in { + type: :function_call_output, + id: String, + call_id: String, + output: String, + status: OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status | nil + } + in { + type: :file_search_call, + id: String, + queries: ^(OpenAI::Internal::Type::ArrayOf[String]), + status: OpenAI::Responses::ResponseFileSearchToolCall::Status, + results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFileSearchToolCall::Result]) | nil + } + in { + type: :web_search_call, + id: String, + action: OpenAI::Responses::ResponseFunctionWebSearch::Action, + status: OpenAI::Responses::ResponseFunctionWebSearch::Status + } + in { + type: :image_generation_call, + id: String, + result: String | nil, + status: OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status + } + in { + type: :computer_call, + id: String, + action: OpenAI::Responses::ResponseComputerToolCall::Action, + call_id: String, + pending_safety_checks: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck]), + status: OpenAI::Responses::ResponseComputerToolCall::Status + } + in { + type: :computer_call_output, + id: String, + call_id: String, + output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + acknowledged_safety_checks: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck]) | nil, + status: OpenAI::Responses::ResponseComputerToolCallOutputItem::Status | nil + } + in { + type: :reasoning, + id: String, + summary: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseReasoningItem::Summary]), + content: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseReasoningItem::Content]) | nil, + encrypted_content: String | nil, + status: OpenAI::Responses::ResponseReasoningItem::Status | nil + } + in { + type: :code_interpreter_call, + id: String, + code: String | nil, + container_id: String, + outputs: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseCodeInterpreterToolCall::Output]) | nil, + status: OpenAI::Responses::ResponseCodeInterpreterToolCall::Status + } + in { + type: :local_shell_call, + id: String, + action: OpenAI::Conversations::ConversationItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Conversations::ConversationItem::LocalShellCall::Status + } + in { + type: :local_shell_call_output, + id: String, + output: String, + status: OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status | nil + } + in { + type: :mcp_list_tools, + id: String, + server_label: String, + tools: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Conversations::ConversationItem::McpListTools::Tool]), + error: String | nil + } + in {type: :mcp_approval_request, id: String, arguments: String, name: String, server_label: String} + in { + type: :mcp_approval_response, + id: String, + approval_request_id: String, + approve: OpenAI::Internal::Type::Boolean, + reason: String | nil + } + in { + type: :mcp_call, + id: String, + arguments: String, + name: String, + server_label: String, + error: String | nil, + output: String | nil + } + in {type: :custom_tool_call, call_id: String, input: String, name: String, id: String | nil} + in {type: :custom_tool_call_output, call_id: String, output: String, id: String | nil} + end + end + end + + def test_delete_required_params + response = @openai.conversations.items.delete("msg_abc", conversation_id: "conv_123") + + assert_pattern do + response => OpenAI::Conversations::Conversation + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + metadata: OpenAI::Internal::Type::Unknown, + object: Symbol + } + end + end +end diff --git a/test/openai/resources/conversations_test.rb b/test/openai/resources/conversations_test.rb new file mode 100644 index 00000000..3064e2af --- /dev/null +++ b/test/openai/resources/conversations_test.rb @@ -0,0 +1,72 @@ +# frozen_string_literal: true + +require_relative "../test_helper" + +class OpenAI::Test::Resources::ConversationsTest < OpenAI::Test::ResourceTest + def test_create + response = @openai.conversations.create + + assert_pattern do + response => OpenAI::Conversations::Conversation + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + metadata: OpenAI::Internal::Type::Unknown, + object: Symbol + } + end + end + + def test_retrieve + response = @openai.conversations.retrieve("conv_123") + + assert_pattern do + response => OpenAI::Conversations::Conversation + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + metadata: OpenAI::Internal::Type::Unknown, + object: Symbol + } + end + end + + def test_update_required_params + response = @openai.conversations.update("conv_123", metadata: {foo: "string"}) + + assert_pattern do + response => OpenAI::Conversations::Conversation + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + metadata: OpenAI::Internal::Type::Unknown, + object: Symbol + } + end + end + + def test_delete + response = @openai.conversations.delete("conv_123") + + assert_pattern do + response => OpenAI::Conversations::ConversationDeletedResource + end + + assert_pattern do + response => { + id: String, + deleted: OpenAI::Internal::Type::Boolean, + object: Symbol + } + end + end +end diff --git a/test/openai/resources/embeddings_test.rb b/test/openai/resources/embeddings_test.rb index 59218d06..b5539280 100644 --- a/test/openai/resources/embeddings_test.rb +++ b/test/openai/resources/embeddings_test.rb @@ -4,21 +4,22 @@ class OpenAI::Test::Resources::EmbeddingsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.embeddings.create( - input: "The quick brown fox jumped over the lazy dog", - model: :"text-embedding-3-small" - ) + response = + @openai.embeddings.create( + input: "The quick brown fox jumped over the lazy dog", + model: :"text-embedding-ada-002" + ) assert_pattern do - response => OpenAI::Models::CreateEmbeddingResponse + response => OpenAI::CreateEmbeddingResponse end assert_pattern do response => { - data: ^(OpenAI::ArrayOf[OpenAI::Models::Embedding]), + data: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Embedding]), model: String, object: Symbol, - usage: OpenAI::Models::CreateEmbeddingResponse::Usage + usage: OpenAI::CreateEmbeddingResponse::Usage } end end diff --git a/test/openai/resources/evals/runs/output_items_test.rb b/test/openai/resources/evals/runs/output_items_test.rb new file mode 100644 index 00000000..344b8e1c --- /dev/null +++ b/test/openai/resources/evals/runs/output_items_test.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +class OpenAI::Test::Resources::Evals::Runs::OutputItemsTest < OpenAI::Test::ResourceTest + def test_retrieve_required_params + response = + @openai.evals.runs.output_items.retrieve("output_item_id", eval_id: "eval_id", run_id: "run_id") + + assert_pattern do + response => OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + datasource_item: ^(OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]), + datasource_item_id: Integer, + eval_id: String, + object: Symbol, + results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]]), + run_id: String, + sample: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample, + status: String + } + end + end + + def test_list_required_params + response = @openai.evals.runs.output_items.list("run_id", eval_id: "eval_id") + + assert_pattern do + response => OpenAI::Internal::CursorPage + end + + row = response.to_enum.first + return if row.nil? + + assert_pattern do + row => OpenAI::Models::Evals::Runs::OutputItemListResponse + end + + assert_pattern do + row => { + id: String, + created_at: Integer, + datasource_item: ^(OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]), + datasource_item_id: Integer, + eval_id: String, + object: Symbol, + results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]]), + run_id: String, + sample: OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample, + status: String + } + end + end +end diff --git a/test/openai/resources/evals/runs_test.rb b/test/openai/resources/evals/runs_test.rb new file mode 100644 index 00000000..cb839c51 --- /dev/null +++ b/test/openai/resources/evals/runs_test.rb @@ -0,0 +1,140 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +class OpenAI::Test::Resources::Evals::RunsTest < OpenAI::Test::ResourceTest + def test_create_required_params + response = + @openai.evals.runs.create( + "eval_id", + data_source: {source: {content: [{item: {foo: "bar"}}], type: :file_content}, type: :jsonl} + ) + + assert_pattern do + response => OpenAI::Models::Evals::RunCreateResponse + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunCreateResponse::DataSource, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + model: String, + name: String, + object: Symbol, + per_model_usage: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCreateResponse::PerModelUsage]), + per_testing_criteria_results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult]), + report_url: String, + result_counts: OpenAI::Models::Evals::RunCreateResponse::ResultCounts, + status: String + } + end + end + + def test_retrieve_required_params + response = @openai.evals.runs.retrieve("run_id", eval_id: "eval_id") + + assert_pattern do + response => OpenAI::Models::Evals::RunRetrieveResponse + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunRetrieveResponse::DataSource, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + model: String, + name: String, + object: Symbol, + per_model_usage: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage]), + per_testing_criteria_results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult]), + report_url: String, + result_counts: OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts, + status: String + } + end + end + + def test_list + response = @openai.evals.runs.list("eval_id") + + assert_pattern do + response => OpenAI::Internal::CursorPage + end + + row = response.to_enum.first + return if row.nil? + + assert_pattern do + row => OpenAI::Models::Evals::RunListResponse + end + + assert_pattern do + row => { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunListResponse::DataSource, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + model: String, + name: String, + object: Symbol, + per_model_usage: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunListResponse::PerModelUsage]), + per_testing_criteria_results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult]), + report_url: String, + result_counts: OpenAI::Models::Evals::RunListResponse::ResultCounts, + status: String + } + end + end + + def test_delete_required_params + response = @openai.evals.runs.delete("run_id", eval_id: "eval_id") + + assert_pattern do + response => OpenAI::Models::Evals::RunDeleteResponse + end + + assert_pattern do + response => { + deleted: OpenAI::Internal::Type::Boolean | nil, + object: String | nil, + run_id: String | nil + } + end + end + + def test_cancel_required_params + response = @openai.evals.runs.cancel("run_id", eval_id: "eval_id") + + assert_pattern do + response => OpenAI::Models::Evals::RunCancelResponse + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + data_source: OpenAI::Models::Evals::RunCancelResponse::DataSource, + error: OpenAI::Evals::EvalAPIError, + eval_id: String, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + model: String, + name: String, + object: Symbol, + per_model_usage: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCancelResponse::PerModelUsage]), + per_testing_criteria_results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult]), + report_url: String, + result_counts: OpenAI::Models::Evals::RunCancelResponse::ResultCounts, + status: String + } + end + end +end diff --git a/test/openai/resources/evals_test.rb b/test/openai/resources/evals_test.rb new file mode 100644 index 00000000..d4cdc458 --- /dev/null +++ b/test/openai/resources/evals_test.rb @@ -0,0 +1,121 @@ +# frozen_string_literal: true + +require_relative "../test_helper" + +class OpenAI::Test::Resources::EvalsTest < OpenAI::Test::ResourceTest + def test_create_required_params + response = + @openai.evals.create( + data_source_config: {item_schema: {foo: "bar"}, type: :custom}, + testing_criteria: [ + { + input: [{content: "content", role: "role"}], + labels: ["string"], + model: "model", + name: "name", + passing_labels: ["string"], + type: :label_model + } + ] + ) + + assert_pattern do + response => OpenAI::Models::EvalCreateResponse + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalCreateResponse::DataSourceConfig, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + name: String, + object: Symbol, + testing_criteria: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalCreateResponse::TestingCriterion]) + } + end + end + + def test_retrieve + response = @openai.evals.retrieve("eval_id") + + assert_pattern do + response => OpenAI::Models::EvalRetrieveResponse + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalRetrieveResponse::DataSourceConfig, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + name: String, + object: Symbol, + testing_criteria: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalRetrieveResponse::TestingCriterion]) + } + end + end + + def test_update + response = @openai.evals.update("eval_id") + + assert_pattern do + response => OpenAI::Models::EvalUpdateResponse + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalUpdateResponse::DataSourceConfig, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + name: String, + object: Symbol, + testing_criteria: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalUpdateResponse::TestingCriterion]) + } + end + end + + def test_list + response = @openai.evals.list + + assert_pattern do + response => OpenAI::Internal::CursorPage + end + + row = response.to_enum.first + return if row.nil? + + assert_pattern do + row => OpenAI::Models::EvalListResponse + end + + assert_pattern do + row => { + id: String, + created_at: Integer, + data_source_config: OpenAI::Models::EvalListResponse::DataSourceConfig, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + name: String, + object: Symbol, + testing_criteria: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalListResponse::TestingCriterion]) + } + end + end + + def test_delete + response = @openai.evals.delete("eval_id") + + assert_pattern do + response => OpenAI::Models::EvalDeleteResponse + end + + assert_pattern do + response => { + deleted: OpenAI::Internal::Type::Boolean, + eval_id: String, + object: String + } + end + end +end diff --git a/test/openai/resources/files_test.rb b/test/openai/resources/files_test.rb index c517f42f..5833751a 100644 --- a/test/openai/resources/files_test.rb +++ b/test/openai/resources/files_test.rb @@ -4,10 +4,10 @@ class OpenAI::Test::Resources::FilesTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.files.create(file: StringIO.new("some file contents"), purpose: :assistants) + response = @openai.files.create(file: Pathname(__FILE__), purpose: :assistants) assert_pattern do - response => OpenAI::Models::FileObject + response => OpenAI::FileObject end assert_pattern do @@ -17,8 +17,8 @@ def test_create_required_params created_at: Integer, filename: String, object: Symbol, - purpose: OpenAI::Models::FileObject::Purpose, - status: OpenAI::Models::FileObject::Status, + purpose: OpenAI::FileObject::Purpose, + status: OpenAI::FileObject::Status, expires_at: Integer | nil, status_details: String | nil } @@ -29,7 +29,7 @@ def test_retrieve response = @openai.files.retrieve("file_id") assert_pattern do - response => OpenAI::Models::FileObject + response => OpenAI::FileObject end assert_pattern do @@ -39,8 +39,8 @@ def test_retrieve created_at: Integer, filename: String, object: Symbol, - purpose: OpenAI::Models::FileObject::Purpose, - status: OpenAI::Models::FileObject::Status, + purpose: OpenAI::FileObject::Purpose, + status: OpenAI::FileObject::Status, expires_at: Integer | nil, status_details: String | nil } @@ -51,12 +51,14 @@ def test_list response = @openai.files.list assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::FileObject + row => OpenAI::FileObject end assert_pattern do @@ -66,8 +68,8 @@ def test_list created_at: Integer, filename: String, object: Symbol, - purpose: OpenAI::Models::FileObject::Purpose, - status: OpenAI::Models::FileObject::Status, + purpose: OpenAI::FileObject::Purpose, + status: OpenAI::FileObject::Status, expires_at: Integer | nil, status_details: String | nil } @@ -78,25 +80,25 @@ def test_delete response = @openai.files.delete("file_id") assert_pattern do - response => OpenAI::Models::FileDeleted + response => OpenAI::FileDeleted end assert_pattern do response => { id: String, - deleted: OpenAI::BooleanModel, + deleted: OpenAI::Internal::Type::Boolean, object: Symbol } end end def test_content - skip("skipped: test server currently has no support for method content-type") + skip("Prism doesn't support application/binary responses") response = @openai.files.content("file_id") assert_pattern do - response => OpenAI::Unknown + response => StringIO end end end diff --git a/test/openai/resources/fine_tuning/alpha/graders_test.rb b/test/openai/resources/fine_tuning/alpha/graders_test.rb new file mode 100644 index 00000000..9e2f659c --- /dev/null +++ b/test/openai/resources/fine_tuning/alpha/graders_test.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +class OpenAI::Test::Resources::FineTuning::Alpha::GradersTest < OpenAI::Test::ResourceTest + def test_run_required_params + response = + @openai.fine_tuning.alpha.graders.run( + grader: {input: "input", name: "name", operation: :eq, reference: "reference", type: :string_check}, + model_sample: "model_sample" + ) + + assert_pattern do + response => OpenAI::Models::FineTuning::Alpha::GraderRunResponse + end + + assert_pattern do + response => { + metadata: OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata, + model_grader_token_usage_per_model: ^(OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]), + reward: Float, + sub_rewards: ^(OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]) + } + end + end + + def test_validate_required_params + response = + @openai.fine_tuning.alpha.graders.validate( + grader: {input: "input", name: "name", operation: :eq, reference: "reference", type: :string_check} + ) + + assert_pattern do + response => OpenAI::Models::FineTuning::Alpha::GraderValidateResponse + end + + assert_pattern do + response => { + grader: OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::Grader | nil + } + end + end +end diff --git a/test/openai/resources/fine_tuning/alpha_test.rb b/test/openai/resources/fine_tuning/alpha_test.rb new file mode 100644 index 00000000..53389b81 --- /dev/null +++ b/test/openai/resources/fine_tuning/alpha_test.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +class OpenAI::Test::Resources::FineTuning::AlphaTest < OpenAI::Test::ResourceTest +end diff --git a/test/openai/resources/fine_tuning/checkpoints/permissions_test.rb b/test/openai/resources/fine_tuning/checkpoints/permissions_test.rb new file mode 100644 index 00000000..0ed0d4c1 --- /dev/null +++ b/test/openai/resources/fine_tuning/checkpoints/permissions_test.rb @@ -0,0 +1,71 @@ +# frozen_string_literal: true + +require_relative "../../../test_helper" + +class OpenAI::Test::Resources::FineTuning::Checkpoints::PermissionsTest < OpenAI::Test::ResourceTest + def test_create_required_params + response = + @openai.fine_tuning.checkpoints.permissions.create( + "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd", + project_ids: ["string"] + ) + + assert_pattern do + response => OpenAI::Internal::Page + end + + row = response.to_enum.first + return if row.nil? + + assert_pattern do + row => OpenAI::Models::FineTuning::Checkpoints::PermissionCreateResponse + end + + assert_pattern do + row => { + id: String, + created_at: Integer, + object: Symbol, + project_id: String + } + end + end + + def test_retrieve + response = @openai.fine_tuning.checkpoints.permissions.retrieve("ft-AF1WoRqd3aJAHsqc9NY7iL8F") + + assert_pattern do + response => OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse + end + + assert_pattern do + response => { + data: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data]), + has_more: OpenAI::Internal::Type::Boolean, + object: Symbol, + first_id: String | nil, + last_id: String | nil + } + end + end + + def test_delete_required_params + response = + @openai.fine_tuning.checkpoints.permissions.delete( + "cp_zc4Q7MP6XxulcVzj4MZdwsAB", + fine_tuned_model_checkpoint: "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd" + ) + + assert_pattern do + response => OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteResponse + end + + assert_pattern do + response => { + id: String, + deleted: OpenAI::Internal::Type::Boolean, + object: Symbol + } + end + end +end diff --git a/test/openai/resources/fine_tuning/checkpoints_test.rb b/test/openai/resources/fine_tuning/checkpoints_test.rb new file mode 100644 index 00000000..acbf41a8 --- /dev/null +++ b/test/openai/resources/fine_tuning/checkpoints_test.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +class OpenAI::Test::Resources::FineTuning::CheckpointsTest < OpenAI::Test::ResourceTest +end diff --git a/test/openai/resources/fine_tuning/jobs/checkpoints_test.rb b/test/openai/resources/fine_tuning/jobs/checkpoints_test.rb index 96ee1164..ab4136ce 100644 --- a/test/openai/resources/fine_tuning/jobs/checkpoints_test.rb +++ b/test/openai/resources/fine_tuning/jobs/checkpoints_test.rb @@ -7,12 +7,14 @@ def test_list response = @openai.fine_tuning.jobs.checkpoints.list("ft-AF1WoRqd3aJAHsqc9NY7iL8F") assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint + row => OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint end assert_pattern do @@ -21,7 +23,7 @@ def test_list created_at: Integer, fine_tuned_model_checkpoint: String, fine_tuning_job_id: String, - metrics: OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, + metrics: OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics, object: Symbol, step_number: Integer } diff --git a/test/openai/resources/fine_tuning/jobs_test.rb b/test/openai/resources/fine_tuning/jobs_test.rb index c085d073..3047ffd4 100644 --- a/test/openai/resources/fine_tuning/jobs_test.rb +++ b/test/openai/resources/fine_tuning/jobs_test.rb @@ -4,33 +4,33 @@ class OpenAI::Test::Resources::FineTuning::JobsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.fine_tuning.jobs.create(model: :"gpt-4o-mini", training_file: "file-abc123") + response = @openai.fine_tuning.jobs.create(model: :"babbage-002", training_file: "file-abc123") assert_pattern do - response => OpenAI::Models::FineTuning::FineTuningJob + response => OpenAI::FineTuning::FineTuningJob end assert_pattern do response => { id: String, created_at: Integer, - error: OpenAI::Models::FineTuning::FineTuningJob::Error | nil, + error: OpenAI::FineTuning::FineTuningJob::Error | nil, fine_tuned_model: String | nil, finished_at: Integer | nil, - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters, + hyperparameters: OpenAI::FineTuning::FineTuningJob::Hyperparameters, model: String, object: Symbol, organization_id: String, - result_files: ^(OpenAI::ArrayOf[String]), + result_files: ^(OpenAI::Internal::Type::ArrayOf[String]), seed: Integer, - status: OpenAI::Models::FineTuning::FineTuningJob::Status, + status: OpenAI::FineTuning::FineTuningJob::Status, trained_tokens: Integer | nil, training_file: String, validation_file: String | nil, estimated_finish: Integer | nil, - integrations: ^(OpenAI::ArrayOf[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject]) | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, - method_: OpenAI::Models::FineTuning::FineTuningJob::Method | nil + integrations: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + method_: OpenAI::FineTuning::FineTuningJob::Method | nil } end end @@ -39,30 +39,30 @@ def test_retrieve response = @openai.fine_tuning.jobs.retrieve("ft-AF1WoRqd3aJAHsqc9NY7iL8F") assert_pattern do - response => OpenAI::Models::FineTuning::FineTuningJob + response => OpenAI::FineTuning::FineTuningJob end assert_pattern do response => { id: String, created_at: Integer, - error: OpenAI::Models::FineTuning::FineTuningJob::Error | nil, + error: OpenAI::FineTuning::FineTuningJob::Error | nil, fine_tuned_model: String | nil, finished_at: Integer | nil, - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters, + hyperparameters: OpenAI::FineTuning::FineTuningJob::Hyperparameters, model: String, object: Symbol, organization_id: String, - result_files: ^(OpenAI::ArrayOf[String]), + result_files: ^(OpenAI::Internal::Type::ArrayOf[String]), seed: Integer, - status: OpenAI::Models::FineTuning::FineTuningJob::Status, + status: OpenAI::FineTuning::FineTuningJob::Status, trained_tokens: Integer | nil, training_file: String, validation_file: String | nil, estimated_finish: Integer | nil, - integrations: ^(OpenAI::ArrayOf[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject]) | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, - method_: OpenAI::Models::FineTuning::FineTuningJob::Method | nil + integrations: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + method_: OpenAI::FineTuning::FineTuningJob::Method | nil } end end @@ -71,35 +71,37 @@ def test_list response = @openai.fine_tuning.jobs.list assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::FineTuning::FineTuningJob + row => OpenAI::FineTuning::FineTuningJob end assert_pattern do row => { id: String, created_at: Integer, - error: OpenAI::Models::FineTuning::FineTuningJob::Error | nil, + error: OpenAI::FineTuning::FineTuningJob::Error | nil, fine_tuned_model: String | nil, finished_at: Integer | nil, - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters, + hyperparameters: OpenAI::FineTuning::FineTuningJob::Hyperparameters, model: String, object: Symbol, organization_id: String, - result_files: ^(OpenAI::ArrayOf[String]), + result_files: ^(OpenAI::Internal::Type::ArrayOf[String]), seed: Integer, - status: OpenAI::Models::FineTuning::FineTuningJob::Status, + status: OpenAI::FineTuning::FineTuningJob::Status, trained_tokens: Integer | nil, training_file: String, validation_file: String | nil, estimated_finish: Integer | nil, - integrations: ^(OpenAI::ArrayOf[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject]) | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, - method_: OpenAI::Models::FineTuning::FineTuningJob::Method | nil + integrations: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + method_: OpenAI::FineTuning::FineTuningJob::Method | nil } end end @@ -108,30 +110,30 @@ def test_cancel response = @openai.fine_tuning.jobs.cancel("ft-AF1WoRqd3aJAHsqc9NY7iL8F") assert_pattern do - response => OpenAI::Models::FineTuning::FineTuningJob + response => OpenAI::FineTuning::FineTuningJob end assert_pattern do response => { id: String, created_at: Integer, - error: OpenAI::Models::FineTuning::FineTuningJob::Error | nil, + error: OpenAI::FineTuning::FineTuningJob::Error | nil, fine_tuned_model: String | nil, finished_at: Integer | nil, - hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters, + hyperparameters: OpenAI::FineTuning::FineTuningJob::Hyperparameters, model: String, object: Symbol, organization_id: String, - result_files: ^(OpenAI::ArrayOf[String]), + result_files: ^(OpenAI::Internal::Type::ArrayOf[String]), seed: Integer, - status: OpenAI::Models::FineTuning::FineTuningJob::Status, + status: OpenAI::FineTuning::FineTuningJob::Status, trained_tokens: Integer | nil, training_file: String, validation_file: String | nil, estimated_finish: Integer | nil, - integrations: ^(OpenAI::ArrayOf[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject]) | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, - method_: OpenAI::Models::FineTuning::FineTuningJob::Method | nil + integrations: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + method_: OpenAI::FineTuning::FineTuningJob::Method | nil } end end @@ -140,23 +142,89 @@ def test_list_events response = @openai.fine_tuning.jobs.list_events("ft-AF1WoRqd3aJAHsqc9NY7iL8F") assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::FineTuning::FineTuningJobEvent + row => OpenAI::FineTuning::FineTuningJobEvent end assert_pattern do row => { id: String, created_at: Integer, - level: OpenAI::Models::FineTuning::FineTuningJobEvent::Level, + level: OpenAI::FineTuning::FineTuningJobEvent::Level, message: String, object: Symbol, - data: OpenAI::Unknown | nil, - type: OpenAI::Models::FineTuning::FineTuningJobEvent::Type | nil + data: OpenAI::Internal::Type::Unknown | nil, + type: OpenAI::FineTuning::FineTuningJobEvent::Type | nil + } + end + end + + def test_pause + response = @openai.fine_tuning.jobs.pause("ft-AF1WoRqd3aJAHsqc9NY7iL8F") + + assert_pattern do + response => OpenAI::FineTuning::FineTuningJob + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + error: OpenAI::FineTuning::FineTuningJob::Error | nil, + fine_tuned_model: String | nil, + finished_at: Integer | nil, + hyperparameters: OpenAI::FineTuning::FineTuningJob::Hyperparameters, + model: String, + object: Symbol, + organization_id: String, + result_files: ^(OpenAI::Internal::Type::ArrayOf[String]), + seed: Integer, + status: OpenAI::FineTuning::FineTuningJob::Status, + trained_tokens: Integer | nil, + training_file: String, + validation_file: String | nil, + estimated_finish: Integer | nil, + integrations: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + method_: OpenAI::FineTuning::FineTuningJob::Method | nil + } + end + end + + def test_resume + response = @openai.fine_tuning.jobs.resume("ft-AF1WoRqd3aJAHsqc9NY7iL8F") + + assert_pattern do + response => OpenAI::FineTuning::FineTuningJob + end + + assert_pattern do + response => { + id: String, + created_at: Integer, + error: OpenAI::FineTuning::FineTuningJob::Error | nil, + fine_tuned_model: String | nil, + finished_at: Integer | nil, + hyperparameters: OpenAI::FineTuning::FineTuningJob::Hyperparameters, + model: String, + object: Symbol, + organization_id: String, + result_files: ^(OpenAI::Internal::Type::ArrayOf[String]), + seed: Integer, + status: OpenAI::FineTuning::FineTuningJob::Status, + trained_tokens: Integer | nil, + training_file: String, + validation_file: String | nil, + estimated_finish: Integer | nil, + integrations: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + method_: OpenAI::FineTuning::FineTuningJob::Method | nil } end end diff --git a/test/openai/resources/fine_tuning/methods_test.rb b/test/openai/resources/fine_tuning/methods_test.rb new file mode 100644 index 00000000..69a6ca5a --- /dev/null +++ b/test/openai/resources/fine_tuning/methods_test.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +class OpenAI::Test::Resources::FineTuning::MethodsTest < OpenAI::Test::ResourceTest +end diff --git a/test/openai/resources/graders/grader_models_test.rb b/test/openai/resources/graders/grader_models_test.rb new file mode 100644 index 00000000..6a2f0b0a --- /dev/null +++ b/test/openai/resources/graders/grader_models_test.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require_relative "../../test_helper" + +class OpenAI::Test::Resources::Graders::GraderModelsTest < OpenAI::Test::ResourceTest +end diff --git a/test/openai/resources/graders_test.rb b/test/openai/resources/graders_test.rb new file mode 100644 index 00000000..64e6bf4f --- /dev/null +++ b/test/openai/resources/graders_test.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require_relative "../test_helper" + +class OpenAI::Test::Resources::GradersTest < OpenAI::Test::ResourceTest +end diff --git a/test/openai/resources/images_test.rb b/test/openai/resources/images_test.rb index 82078e4d..0b61a9a0 100644 --- a/test/openai/resources/images_test.rb +++ b/test/openai/resources/images_test.rb @@ -4,34 +4,42 @@ class OpenAI::Test::Resources::ImagesTest < OpenAI::Test::ResourceTest def test_create_variation_required_params - response = @openai.images.create_variation(image: StringIO.new("some file contents")) + response = @openai.images.create_variation(image: Pathname(__FILE__)) assert_pattern do - response => OpenAI::Models::ImagesResponse + response => OpenAI::ImagesResponse end assert_pattern do response => { created: Integer, - data: ^(OpenAI::ArrayOf[OpenAI::Models::Image]) + background: OpenAI::ImagesResponse::Background | nil, + data: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Image]) | nil, + output_format: OpenAI::ImagesResponse::OutputFormat | nil, + quality: OpenAI::ImagesResponse::Quality | nil, + size: OpenAI::ImagesResponse::Size | nil, + usage: OpenAI::ImagesResponse::Usage | nil } end end def test_edit_required_params - response = @openai.images.edit( - image: StringIO.new("some file contents"), - prompt: "A cute baby sea otter wearing a beret" - ) + response = + @openai.images.edit(image: Pathname(__FILE__), prompt: "A cute baby sea otter wearing a beret") assert_pattern do - response => OpenAI::Models::ImagesResponse + response => OpenAI::ImagesResponse end assert_pattern do response => { created: Integer, - data: ^(OpenAI::ArrayOf[OpenAI::Models::Image]) + background: OpenAI::ImagesResponse::Background | nil, + data: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Image]) | nil, + output_format: OpenAI::ImagesResponse::OutputFormat | nil, + quality: OpenAI::ImagesResponse::Quality | nil, + size: OpenAI::ImagesResponse::Size | nil, + usage: OpenAI::ImagesResponse::Usage | nil } end end @@ -40,13 +48,18 @@ def test_generate_required_params response = @openai.images.generate(prompt: "A cute baby sea otter") assert_pattern do - response => OpenAI::Models::ImagesResponse + response => OpenAI::ImagesResponse end assert_pattern do response => { created: Integer, - data: ^(OpenAI::ArrayOf[OpenAI::Models::Image]) + background: OpenAI::ImagesResponse::Background | nil, + data: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Image]) | nil, + output_format: OpenAI::ImagesResponse::OutputFormat | nil, + quality: OpenAI::ImagesResponse::Quality | nil, + size: OpenAI::ImagesResponse::Size | nil, + usage: OpenAI::ImagesResponse::Usage | nil } end end diff --git a/test/openai/resources/models_test.rb b/test/openai/resources/models_test.rb index d91063a2..1a9ae880 100644 --- a/test/openai/resources/models_test.rb +++ b/test/openai/resources/models_test.rb @@ -7,7 +7,7 @@ def test_retrieve response = @openai.models.retrieve("gpt-4o-mini") assert_pattern do - response => OpenAI::Models::Model + response => OpenAI::Model end assert_pattern do @@ -24,12 +24,14 @@ def test_list response = @openai.models.list assert_pattern do - response => OpenAI::Page + response => OpenAI::Internal::Page end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::Model + row => OpenAI::Model end assert_pattern do @@ -46,13 +48,13 @@ def test_delete response = @openai.models.delete("ft:gpt-4o-mini:acemeco:suffix:abc123") assert_pattern do - response => OpenAI::Models::ModelDeleted + response => OpenAI::ModelDeleted end assert_pattern do response => { id: String, - deleted: OpenAI::BooleanModel, + deleted: OpenAI::Internal::Type::Boolean, object: String } end diff --git a/test/openai/resources/moderations_test.rb b/test/openai/resources/moderations_test.rb index cbd3c69f..8e1ce626 100644 --- a/test/openai/resources/moderations_test.rb +++ b/test/openai/resources/moderations_test.rb @@ -14,7 +14,7 @@ def test_create_required_params response => { id: String, model: String, - results: ^(OpenAI::ArrayOf[OpenAI::Models::Moderation]) + results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Moderation]) } end end diff --git a/test/openai/resources/responses/input_items_test.rb b/test/openai/resources/responses/input_items_test.rb index c00355a7..dc76d567 100644 --- a/test/openai/resources/responses/input_items_test.rb +++ b/test/openai/resources/responses/input_items_test.rb @@ -7,24 +7,34 @@ def test_list response = @openai.responses.input_items.list("response_id") assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::Responses::ResponseItemList::Data + row => OpenAI::Responses::ResponseItem end assert_pattern do case row - in OpenAI::Models::Responses::ResponseItemList::Data::Message - in OpenAI::Models::Responses::ResponseOutputMessage - in OpenAI::Models::Responses::ResponseFileSearchToolCall - in OpenAI::Models::Responses::ResponseComputerToolCall - in OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput - in OpenAI::Models::Responses::ResponseFunctionWebSearch - in OpenAI::Models::Responses::ResponseFunctionToolCall - in OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput + in OpenAI::Responses::ResponseInputMessageItem + in OpenAI::Responses::ResponseOutputMessage + in OpenAI::Responses::ResponseFileSearchToolCall + in OpenAI::Responses::ResponseComputerToolCall + in OpenAI::Responses::ResponseComputerToolCallOutputItem + in OpenAI::Responses::ResponseFunctionWebSearch + in OpenAI::Responses::ResponseFunctionToolCallItem + in OpenAI::Responses::ResponseFunctionToolCallOutputItem + in OpenAI::Responses::ResponseItem::ImageGenerationCall + in OpenAI::Responses::ResponseCodeInterpreterToolCall + in OpenAI::Responses::ResponseItem::LocalShellCall + in OpenAI::Responses::ResponseItem::LocalShellCallOutput + in OpenAI::Responses::ResponseItem::McpListTools + in OpenAI::Responses::ResponseItem::McpApprovalRequest + in OpenAI::Responses::ResponseItem::McpApprovalResponse + in OpenAI::Responses::ResponseItem::McpCall end end @@ -33,55 +43,103 @@ def test_list in { type: :message, id: String, - content: ^(OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseInputContent]), - role: OpenAI::Models::Responses::ResponseItemList::Data::Message::Role, - status: OpenAI::Models::Responses::ResponseItemList::Data::Message::Status | nil + content: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent]), + role: OpenAI::Responses::ResponseInputMessageItem::Role, + status: OpenAI::Responses::ResponseInputMessageItem::Status | nil } in { type: :message, id: String, - content: ^(OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseOutputMessage::Content]), + content: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputMessage::Content]), role: Symbol, - status: OpenAI::Models::Responses::ResponseOutputMessage::Status + status: OpenAI::Responses::ResponseOutputMessage::Status } in { type: :file_search_call, id: String, - queries: ^(OpenAI::ArrayOf[String]), - status: OpenAI::Models::Responses::ResponseFileSearchToolCall::Status, - results: ^(OpenAI::ArrayOf[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result]) | nil + queries: ^(OpenAI::Internal::Type::ArrayOf[String]), + status: OpenAI::Responses::ResponseFileSearchToolCall::Status, + results: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFileSearchToolCall::Result]) | nil } in { type: :computer_call, id: String, - action: OpenAI::Models::Responses::ResponseComputerToolCall::Action, + action: OpenAI::Responses::ResponseComputerToolCall::Action, call_id: String, - pending_safety_checks: ^(OpenAI::ArrayOf[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck]), - status: OpenAI::Models::Responses::ResponseComputerToolCall::Status + pending_safety_checks: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck]), + status: OpenAI::Responses::ResponseComputerToolCall::Status } in { type: :computer_call_output, id: String, call_id: String, - output: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output, - acknowledged_safety_checks: ^(OpenAI::ArrayOf[OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::AcknowledgedSafetyCheck]) | nil, - status: OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Status | nil + output: OpenAI::Responses::ResponseComputerToolCallOutputScreenshot, + acknowledged_safety_checks: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck]) | nil, + status: OpenAI::Responses::ResponseComputerToolCallOutputItem::Status | nil } - in {type: :web_search_call, id: String, status: OpenAI::Models::Responses::ResponseFunctionWebSearch::Status} in { - type: :function_call, + type: :web_search_call, id: String, - arguments: String, - call_id: String, - name: String, - status: OpenAI::Models::Responses::ResponseFunctionToolCall::Status | nil + action: OpenAI::Responses::ResponseFunctionWebSearch::Action, + status: OpenAI::Responses::ResponseFunctionWebSearch::Status } in { type: :function_call_output, id: String, call_id: String, output: String, - status: OpenAI::Models::Responses::ResponseItemList::Data::FunctionCallOutput::Status | nil + status: OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status | nil + } + in { + type: :image_generation_call, + id: String, + result: String | nil, + status: OpenAI::Responses::ResponseItem::ImageGenerationCall::Status + } + in { + type: :code_interpreter_call, + id: String, + code: String | nil, + container_id: String, + outputs: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseCodeInterpreterToolCall::Output]) | nil, + status: OpenAI::Responses::ResponseCodeInterpreterToolCall::Status + } + in { + type: :local_shell_call, + id: String, + action: OpenAI::Responses::ResponseItem::LocalShellCall::Action, + call_id: String, + status: OpenAI::Responses::ResponseItem::LocalShellCall::Status + } + in { + type: :local_shell_call_output, + id: String, + output: String, + status: OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status | nil + } + in { + type: :mcp_list_tools, + id: String, + server_label: String, + tools: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseItem::McpListTools::Tool]), + error: String | nil + } + in {type: :mcp_approval_request, id: String, arguments: String, name: String, server_label: String} + in { + type: :mcp_approval_response, + id: String, + approval_request_id: String, + approve: OpenAI::Internal::Type::Boolean, + reason: String | nil + } + in { + type: :mcp_call, + id: String, + arguments: String, + name: String, + server_label: String, + error: String | nil, + output: String | nil } end end diff --git a/test/openai/resources/responses_test.rb b/test/openai/resources/responses_test.rb index bd1bcaf3..d8e57585 100644 --- a/test/openai/resources/responses_test.rb +++ b/test/openai/resources/responses_test.rb @@ -3,36 +3,44 @@ require_relative "../test_helper" class OpenAI::Test::Resources::ResponsesTest < OpenAI::Test::ResourceTest - def test_create_required_params - response = @openai.responses.create(input: "string", model: :"gpt-4o", stream: true) + def test_create + response = @openai.responses.create assert_pattern do - response => OpenAI::Models::Responses::Response + response => OpenAI::Responses::Response end assert_pattern do response => { id: String, created_at: Float, - error: OpenAI::Models::Responses::ResponseError | nil, - incomplete_details: OpenAI::Models::Responses::Response::IncompleteDetails | nil, - instructions: String | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, - model: OpenAI::Models::Responses::Response::Model, + error: OpenAI::Responses::ResponseError | nil, + incomplete_details: OpenAI::Responses::Response::IncompleteDetails | nil, + instructions: OpenAI::Responses::Response::Instructions | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + model: OpenAI::ResponsesModel, object: Symbol, - output: ^(OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseOutputItem]), - parallel_tool_calls: OpenAI::BooleanModel, + output: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem]), + parallel_tool_calls: OpenAI::Internal::Type::Boolean, temperature: Float | nil, - tool_choice: OpenAI::Models::Responses::Response::ToolChoice, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Responses::Tool]), + tool_choice: OpenAI::Responses::Response::ToolChoice, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool]), top_p: Float | nil, + background: OpenAI::Internal::Type::Boolean | nil, + conversation: OpenAI::Responses::Response::Conversation | nil, max_output_tokens: Integer | nil, + max_tool_calls: Integer | nil, previous_response_id: String | nil, - reasoning: OpenAI::Models::Reasoning | nil, - status: OpenAI::Models::Responses::ResponseStatus | nil, - text: OpenAI::Models::Responses::ResponseTextConfig | nil, - truncation: OpenAI::Models::Responses::Response::Truncation | nil, - usage: OpenAI::Models::Responses::ResponseUsage | nil, + prompt: OpenAI::Responses::ResponsePrompt | nil, + prompt_cache_key: String | nil, + reasoning: OpenAI::Reasoning | nil, + safety_identifier: String | nil, + service_tier: OpenAI::Responses::Response::ServiceTier | nil, + status: OpenAI::Responses::ResponseStatus | nil, + text: OpenAI::Responses::ResponseTextConfig | nil, + top_logprobs: Integer | nil, + truncation: OpenAI::Responses::Response::Truncation | nil, + usage: OpenAI::Responses::ResponseUsage | nil, user: String | nil } end @@ -42,32 +50,40 @@ def test_retrieve response = @openai.responses.retrieve("resp_677efb5139a88190b512bc3fef8e535d") assert_pattern do - response => OpenAI::Models::Responses::Response + response => OpenAI::Responses::Response end assert_pattern do response => { id: String, created_at: Float, - error: OpenAI::Models::Responses::ResponseError | nil, - incomplete_details: OpenAI::Models::Responses::Response::IncompleteDetails | nil, - instructions: String | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, - model: OpenAI::Models::Responses::Response::Model, + error: OpenAI::Responses::ResponseError | nil, + incomplete_details: OpenAI::Responses::Response::IncompleteDetails | nil, + instructions: OpenAI::Responses::Response::Instructions | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + model: OpenAI::ResponsesModel, object: Symbol, - output: ^(OpenAI::ArrayOf[union: OpenAI::Models::Responses::ResponseOutputItem]), - parallel_tool_calls: OpenAI::BooleanModel, + output: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem]), + parallel_tool_calls: OpenAI::Internal::Type::Boolean, temperature: Float | nil, - tool_choice: OpenAI::Models::Responses::Response::ToolChoice, - tools: ^(OpenAI::ArrayOf[union: OpenAI::Models::Responses::Tool]), + tool_choice: OpenAI::Responses::Response::ToolChoice, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool]), top_p: Float | nil, + background: OpenAI::Internal::Type::Boolean | nil, + conversation: OpenAI::Responses::Response::Conversation | nil, max_output_tokens: Integer | nil, + max_tool_calls: Integer | nil, previous_response_id: String | nil, - reasoning: OpenAI::Models::Reasoning | nil, - status: OpenAI::Models::Responses::ResponseStatus | nil, - text: OpenAI::Models::Responses::ResponseTextConfig | nil, - truncation: OpenAI::Models::Responses::Response::Truncation | nil, - usage: OpenAI::Models::Responses::ResponseUsage | nil, + prompt: OpenAI::Responses::ResponsePrompt | nil, + prompt_cache_key: String | nil, + reasoning: OpenAI::Reasoning | nil, + safety_identifier: String | nil, + service_tier: OpenAI::Responses::Response::ServiceTier | nil, + status: OpenAI::Responses::ResponseStatus | nil, + text: OpenAI::Responses::ResponseTextConfig | nil, + top_logprobs: Integer | nil, + truncation: OpenAI::Responses::Response::Truncation | nil, + usage: OpenAI::Responses::ResponseUsage | nil, user: String | nil } end @@ -80,4 +96,47 @@ def test_delete response => nil end end + + def test_cancel + response = @openai.responses.cancel("resp_677efb5139a88190b512bc3fef8e535d") + + assert_pattern do + response => OpenAI::Responses::Response + end + + assert_pattern do + response => { + id: String, + created_at: Float, + error: OpenAI::Responses::ResponseError | nil, + incomplete_details: OpenAI::Responses::Response::IncompleteDetails | nil, + instructions: OpenAI::Responses::Response::Instructions | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, + model: OpenAI::ResponsesModel, + object: Symbol, + output: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem]), + parallel_tool_calls: OpenAI::Internal::Type::Boolean, + temperature: Float | nil, + tool_choice: OpenAI::Responses::Response::ToolChoice, + tools: ^(OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool]), + top_p: Float | nil, + background: OpenAI::Internal::Type::Boolean | nil, + conversation: OpenAI::Responses::Response::Conversation | nil, + max_output_tokens: Integer | nil, + max_tool_calls: Integer | nil, + previous_response_id: String | nil, + prompt: OpenAI::Responses::ResponsePrompt | nil, + prompt_cache_key: String | nil, + reasoning: OpenAI::Reasoning | nil, + safety_identifier: String | nil, + service_tier: OpenAI::Responses::Response::ServiceTier | nil, + status: OpenAI::Responses::ResponseStatus | nil, + text: OpenAI::Responses::ResponseTextConfig | nil, + top_logprobs: Integer | nil, + truncation: OpenAI::Responses::Response::Truncation | nil, + usage: OpenAI::Responses::ResponseUsage | nil, + user: String | nil + } + end + end end diff --git a/test/openai/resources/uploads/parts_test.rb b/test/openai/resources/uploads/parts_test.rb index f5c9e510..92fbc11f 100644 --- a/test/openai/resources/uploads/parts_test.rb +++ b/test/openai/resources/uploads/parts_test.rb @@ -4,10 +4,10 @@ class OpenAI::Test::Resources::Uploads::PartsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.uploads.parts.create("upload_abc123", data: StringIO.new("some file contents")) + response = @openai.uploads.parts.create("upload_abc123", data: Pathname(__FILE__)) assert_pattern do - response => OpenAI::Models::Uploads::UploadPart + response => OpenAI::Uploads::UploadPart end assert_pattern do diff --git a/test/openai/resources/uploads_test.rb b/test/openai/resources/uploads_test.rb index 88af217d..38a3a1e4 100644 --- a/test/openai/resources/uploads_test.rb +++ b/test/openai/resources/uploads_test.rb @@ -4,15 +4,11 @@ class OpenAI::Test::Resources::UploadsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.uploads.create( - bytes: 0, - filename: "filename", - mime_type: "mime_type", - purpose: :assistants - ) + response = + @openai.uploads.create(bytes: 0, filename: "filename", mime_type: "mime_type", purpose: :assistants) assert_pattern do - response => OpenAI::Models::Upload + response => OpenAI::Upload end assert_pattern do @@ -24,8 +20,8 @@ def test_create_required_params filename: String, object: Symbol, purpose: String, - status: OpenAI::Models::Upload::Status, - file: OpenAI::Models::FileObject | nil + status: OpenAI::Upload::Status, + file: OpenAI::FileObject | nil } end end @@ -34,7 +30,7 @@ def test_cancel response = @openai.uploads.cancel("upload_abc123") assert_pattern do - response => OpenAI::Models::Upload + response => OpenAI::Upload end assert_pattern do @@ -46,8 +42,8 @@ def test_cancel filename: String, object: Symbol, purpose: String, - status: OpenAI::Models::Upload::Status, - file: OpenAI::Models::FileObject | nil + status: OpenAI::Upload::Status, + file: OpenAI::FileObject | nil } end end @@ -56,7 +52,7 @@ def test_complete_required_params response = @openai.uploads.complete("upload_abc123", part_ids: ["string"]) assert_pattern do - response => OpenAI::Models::Upload + response => OpenAI::Upload end assert_pattern do @@ -68,8 +64,8 @@ def test_complete_required_params filename: String, object: Symbol, purpose: String, - status: OpenAI::Models::Upload::Status, - file: OpenAI::Models::FileObject | nil + status: OpenAI::Upload::Status, + file: OpenAI::FileObject | nil } end end diff --git a/test/openai/resources/vector_stores/file_batches_test.rb b/test/openai/resources/vector_stores/file_batches_test.rb index d9d66e4d..ccaeb85e 100644 --- a/test/openai/resources/vector_stores/file_batches_test.rb +++ b/test/openai/resources/vector_stores/file_batches_test.rb @@ -7,16 +7,16 @@ def test_create_required_params response = @openai.vector_stores.file_batches.create("vs_abc123", file_ids: ["string"]) assert_pattern do - response => OpenAI::Models::VectorStores::VectorStoreFileBatch + response => OpenAI::VectorStores::VectorStoreFileBatch end assert_pattern do response => { id: String, created_at: Integer, - file_counts: OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts, + file_counts: OpenAI::VectorStores::VectorStoreFileBatch::FileCounts, object: Symbol, - status: OpenAI::Models::VectorStores::VectorStoreFileBatch::Status, + status: OpenAI::VectorStores::VectorStoreFileBatch::Status, vector_store_id: String } end @@ -26,16 +26,16 @@ def test_retrieve_required_params response = @openai.vector_stores.file_batches.retrieve("vsfb_abc123", vector_store_id: "vs_abc123") assert_pattern do - response => OpenAI::Models::VectorStores::VectorStoreFileBatch + response => OpenAI::VectorStores::VectorStoreFileBatch end assert_pattern do response => { id: String, created_at: Integer, - file_counts: OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts, + file_counts: OpenAI::VectorStores::VectorStoreFileBatch::FileCounts, object: Symbol, - status: OpenAI::Models::VectorStores::VectorStoreFileBatch::Status, + status: OpenAI::VectorStores::VectorStoreFileBatch::Status, vector_store_id: String } end @@ -45,16 +45,16 @@ def test_cancel_required_params response = @openai.vector_stores.file_batches.cancel("batch_id", vector_store_id: "vector_store_id") assert_pattern do - response => OpenAI::Models::VectorStores::VectorStoreFileBatch + response => OpenAI::VectorStores::VectorStoreFileBatch end assert_pattern do response => { id: String, created_at: Integer, - file_counts: OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts, + file_counts: OpenAI::VectorStores::VectorStoreFileBatch::FileCounts, object: Symbol, - status: OpenAI::Models::VectorStores::VectorStoreFileBatch::Status, + status: OpenAI::VectorStores::VectorStoreFileBatch::Status, vector_store_id: String } end @@ -64,25 +64,27 @@ def test_list_files_required_params response = @openai.vector_stores.file_batches.list_files("batch_id", vector_store_id: "vector_store_id") assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::VectorStores::VectorStoreFile + row => OpenAI::VectorStores::VectorStoreFile end assert_pattern do row => { id: String, created_at: Integer, - last_error: OpenAI::Models::VectorStores::VectorStoreFile::LastError | nil, + last_error: OpenAI::VectorStores::VectorStoreFile::LastError | nil, object: Symbol, - status: OpenAI::Models::VectorStores::VectorStoreFile::Status, + status: OpenAI::VectorStores::VectorStoreFile::Status, usage_bytes: Integer, vector_store_id: String, - attributes: ^(OpenAI::HashOf[union: OpenAI::Models::VectorStores::VectorStoreFile::Attribute]) | nil, - chunking_strategy: OpenAI::Models::FileChunkingStrategy | nil + attributes: ^(OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::VectorStoreFile::Attribute]) | nil, + chunking_strategy: OpenAI::FileChunkingStrategy | nil } end end diff --git a/test/openai/resources/vector_stores/files_test.rb b/test/openai/resources/vector_stores/files_test.rb index a8c36003..e322a6ba 100644 --- a/test/openai/resources/vector_stores/files_test.rb +++ b/test/openai/resources/vector_stores/files_test.rb @@ -7,20 +7,20 @@ def test_create_required_params response = @openai.vector_stores.files.create("vs_abc123", file_id: "file_id") assert_pattern do - response => OpenAI::Models::VectorStores::VectorStoreFile + response => OpenAI::VectorStores::VectorStoreFile end assert_pattern do response => { id: String, created_at: Integer, - last_error: OpenAI::Models::VectorStores::VectorStoreFile::LastError | nil, + last_error: OpenAI::VectorStores::VectorStoreFile::LastError | nil, object: Symbol, - status: OpenAI::Models::VectorStores::VectorStoreFile::Status, + status: OpenAI::VectorStores::VectorStoreFile::Status, usage_bytes: Integer, vector_store_id: String, - attributes: ^(OpenAI::HashOf[union: OpenAI::Models::VectorStores::VectorStoreFile::Attribute]) | nil, - chunking_strategy: OpenAI::Models::FileChunkingStrategy | nil + attributes: ^(OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::VectorStoreFile::Attribute]) | nil, + chunking_strategy: OpenAI::FileChunkingStrategy | nil } end end @@ -29,46 +29,47 @@ def test_retrieve_required_params response = @openai.vector_stores.files.retrieve("file-abc123", vector_store_id: "vs_abc123") assert_pattern do - response => OpenAI::Models::VectorStores::VectorStoreFile + response => OpenAI::VectorStores::VectorStoreFile end assert_pattern do response => { id: String, created_at: Integer, - last_error: OpenAI::Models::VectorStores::VectorStoreFile::LastError | nil, + last_error: OpenAI::VectorStores::VectorStoreFile::LastError | nil, object: Symbol, - status: OpenAI::Models::VectorStores::VectorStoreFile::Status, + status: OpenAI::VectorStores::VectorStoreFile::Status, usage_bytes: Integer, vector_store_id: String, - attributes: ^(OpenAI::HashOf[union: OpenAI::Models::VectorStores::VectorStoreFile::Attribute]) | nil, - chunking_strategy: OpenAI::Models::FileChunkingStrategy | nil + attributes: ^(OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::VectorStoreFile::Attribute]) | nil, + chunking_strategy: OpenAI::FileChunkingStrategy | nil } end end def test_update_required_params - response = @openai.vector_stores.files.update( - "file-abc123", - vector_store_id: "vs_abc123", - attributes: {foo: "string"} - ) + response = + @openai.vector_stores.files.update( + "file-abc123", + vector_store_id: "vs_abc123", + attributes: {foo: "string"} + ) assert_pattern do - response => OpenAI::Models::VectorStores::VectorStoreFile + response => OpenAI::VectorStores::VectorStoreFile end assert_pattern do response => { id: String, created_at: Integer, - last_error: OpenAI::Models::VectorStores::VectorStoreFile::LastError | nil, + last_error: OpenAI::VectorStores::VectorStoreFile::LastError | nil, object: Symbol, - status: OpenAI::Models::VectorStores::VectorStoreFile::Status, + status: OpenAI::VectorStores::VectorStoreFile::Status, usage_bytes: Integer, vector_store_id: String, - attributes: ^(OpenAI::HashOf[union: OpenAI::Models::VectorStores::VectorStoreFile::Attribute]) | nil, - chunking_strategy: OpenAI::Models::FileChunkingStrategy | nil + attributes: ^(OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::VectorStoreFile::Attribute]) | nil, + chunking_strategy: OpenAI::FileChunkingStrategy | nil } end end @@ -77,25 +78,27 @@ def test_list response = @openai.vector_stores.files.list("vector_store_id") assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::VectorStores::VectorStoreFile + row => OpenAI::VectorStores::VectorStoreFile end assert_pattern do row => { id: String, created_at: Integer, - last_error: OpenAI::Models::VectorStores::VectorStoreFile::LastError | nil, + last_error: OpenAI::VectorStores::VectorStoreFile::LastError | nil, object: Symbol, - status: OpenAI::Models::VectorStores::VectorStoreFile::Status, + status: OpenAI::VectorStores::VectorStoreFile::Status, usage_bytes: Integer, vector_store_id: String, - attributes: ^(OpenAI::HashOf[union: OpenAI::Models::VectorStores::VectorStoreFile::Attribute]) | nil, - chunking_strategy: OpenAI::Models::FileChunkingStrategy | nil + attributes: ^(OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::VectorStoreFile::Attribute]) | nil, + chunking_strategy: OpenAI::FileChunkingStrategy | nil } end end @@ -104,13 +107,13 @@ def test_delete_required_params response = @openai.vector_stores.files.delete("file_id", vector_store_id: "vector_store_id") assert_pattern do - response => OpenAI::Models::VectorStores::VectorStoreFileDeleted + response => OpenAI::VectorStores::VectorStoreFileDeleted end assert_pattern do response => { id: String, - deleted: OpenAI::BooleanModel, + deleted: OpenAI::Internal::Type::Boolean, object: Symbol } end @@ -120,10 +123,12 @@ def test_content_required_params response = @openai.vector_stores.files.content("file-abc123", vector_store_id: "vs_abc123") assert_pattern do - response => OpenAI::Page + response => OpenAI::Internal::Page end row = response.to_enum.first + return if row.nil? + assert_pattern do row => OpenAI::Models::VectorStores::FileContentResponse end diff --git a/test/openai/resources/vector_stores_test.rb b/test/openai/resources/vector_stores_test.rb index 2296e034..bbce9895 100644 --- a/test/openai/resources/vector_stores_test.rb +++ b/test/openai/resources/vector_stores_test.rb @@ -7,21 +7,21 @@ def test_create response = @openai.vector_stores.create assert_pattern do - response => OpenAI::Models::VectorStore + response => OpenAI::VectorStore end assert_pattern do response => { id: String, created_at: Integer, - file_counts: OpenAI::Models::VectorStore::FileCounts, + file_counts: OpenAI::VectorStore::FileCounts, last_active_at: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, name: String, object: Symbol, - status: OpenAI::Models::VectorStore::Status, + status: OpenAI::VectorStore::Status, usage_bytes: Integer, - expires_after: OpenAI::Models::VectorStore::ExpiresAfter | nil, + expires_after: OpenAI::VectorStore::ExpiresAfter | nil, expires_at: Integer | nil } end @@ -31,21 +31,21 @@ def test_retrieve response = @openai.vector_stores.retrieve("vector_store_id") assert_pattern do - response => OpenAI::Models::VectorStore + response => OpenAI::VectorStore end assert_pattern do response => { id: String, created_at: Integer, - file_counts: OpenAI::Models::VectorStore::FileCounts, + file_counts: OpenAI::VectorStore::FileCounts, last_active_at: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, name: String, object: Symbol, - status: OpenAI::Models::VectorStore::Status, + status: OpenAI::VectorStore::Status, usage_bytes: Integer, - expires_after: OpenAI::Models::VectorStore::ExpiresAfter | nil, + expires_after: OpenAI::VectorStore::ExpiresAfter | nil, expires_at: Integer | nil } end @@ -55,21 +55,21 @@ def test_update response = @openai.vector_stores.update("vector_store_id") assert_pattern do - response => OpenAI::Models::VectorStore + response => OpenAI::VectorStore end assert_pattern do response => { id: String, created_at: Integer, - file_counts: OpenAI::Models::VectorStore::FileCounts, + file_counts: OpenAI::VectorStore::FileCounts, last_active_at: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, name: String, object: Symbol, - status: OpenAI::Models::VectorStore::Status, + status: OpenAI::VectorStore::Status, usage_bytes: Integer, - expires_after: OpenAI::Models::VectorStore::ExpiresAfter | nil, + expires_after: OpenAI::VectorStore::ExpiresAfter | nil, expires_at: Integer | nil } end @@ -79,26 +79,28 @@ def test_list response = @openai.vector_stores.list assert_pattern do - response => OpenAI::CursorPage + response => OpenAI::Internal::CursorPage end row = response.to_enum.first + return if row.nil? + assert_pattern do - row => OpenAI::Models::VectorStore + row => OpenAI::VectorStore end assert_pattern do row => { id: String, created_at: Integer, - file_counts: OpenAI::Models::VectorStore::FileCounts, + file_counts: OpenAI::VectorStore::FileCounts, last_active_at: Integer | nil, - metadata: ^(OpenAI::HashOf[String]) | nil, + metadata: ^(OpenAI::Internal::Type::HashOf[String]) | nil, name: String, object: Symbol, - status: OpenAI::Models::VectorStore::Status, + status: OpenAI::VectorStore::Status, usage_bytes: Integer, - expires_after: OpenAI::Models::VectorStore::ExpiresAfter | nil, + expires_after: OpenAI::VectorStore::ExpiresAfter | nil, expires_at: Integer | nil } end @@ -108,13 +110,13 @@ def test_delete response = @openai.vector_stores.delete("vector_store_id") assert_pattern do - response => OpenAI::Models::VectorStoreDeleted + response => OpenAI::VectorStoreDeleted end assert_pattern do response => { id: String, - deleted: OpenAI::BooleanModel, + deleted: OpenAI::Internal::Type::Boolean, object: Symbol } end @@ -124,18 +126,20 @@ def test_search_required_params response = @openai.vector_stores.search("vs_abc123", query: "string") assert_pattern do - response => OpenAI::Page + response => OpenAI::Internal::Page end row = response.to_enum.first + return if row.nil? + assert_pattern do row => OpenAI::Models::VectorStoreSearchResponse end assert_pattern do row => { - attributes: ^(OpenAI::HashOf[union: OpenAI::Models::VectorStoreSearchResponse::Attribute]) | nil, - content: ^(OpenAI::ArrayOf[OpenAI::Models::VectorStoreSearchResponse::Content]), + attributes: ^(OpenAI::Internal::Type::HashOf[union: OpenAI::Models::VectorStoreSearchResponse::Attribute]) | nil, + content: ^(OpenAI::Internal::Type::ArrayOf[OpenAI::Models::VectorStoreSearchResponse::Content]), file_id: String, filename: String, score: Float diff --git a/test/openai/resources/webhooks_test.rb b/test/openai/resources/webhooks_test.rb new file mode 100644 index 00000000..514b3d31 --- /dev/null +++ b/test/openai/resources/webhooks_test.rb @@ -0,0 +1,6 @@ +# frozen_string_literal: true + +require_relative "../test_helper" + +class OpenAI::Test::Resources::WebhooksTest < OpenAI::Test::ResourceTest +end diff --git a/test/openai/test_helper.rb b/test/openai/test_helper.rb index d0a5de2b..0878fc76 100644 --- a/test/openai/test_helper.rb +++ b/test/openai/test_helper.rb @@ -15,6 +15,7 @@ require "minitest/hooks/test" require "minitest/proveit" require "minitest/rg" +require "webmock" require_relative "../../lib/openai" require_relative "resource_namespaces" @@ -44,11 +45,19 @@ def self.now = Thread.current.thread_variable_get(:time_now) || _now class OpenAI::Test::SingletonClient < OpenAI::Client include Singleton + TEST_API_BASE_URL = ENV.fetch("TEST_API_BASE_URL", "http://localhost:4010") + def initialize - super(base_url: ENV.fetch("TEST_API_BASE_URL", "http://localhost:4010"), api_key: "My API Key") + super(base_url: OpenAI::Test::SingletonClient::TEST_API_BASE_URL, api_key: "My API Key") end end +module Minitest::Serial + def test_order = :random + + def run_one_method(...) = Minitest::Runnable.run_one_method(...) +end + class Minitest::Test include Minitest::Hooks @@ -72,3 +81,7 @@ def around_all = async? ? Sync { super } : super def around = async? ? Async { super }.wait : super end + +module WebMock + AssertionFailure.error_class = Minitest::Assertion +end diff --git a/test/openai/util_test.rb b/test/openai/util_test.rb deleted file mode 100644 index 91c7bd24..00000000 --- a/test/openai/util_test.rb +++ /dev/null @@ -1,496 +0,0 @@ -# frozen_string_literal: true - -require_relative "test_helper" - -class OpenAI::Test::UtilDataHandlingTest < Minitest::Test - def test_left_map - assert_pattern do - OpenAI::Util.deep_merge({a: 1}, nil) => nil - end - end - - def test_right_map - assert_pattern do - OpenAI::Util.deep_merge(nil, {a: 1}) => {a: 1} - end - end - - def test_disjoint_maps - assert_pattern do - OpenAI::Util.deep_merge({b: 2}, {a: 1}) => {a: 1, b: 2} - end - end - - def test_overlapping_maps - assert_pattern do - OpenAI::Util.deep_merge({b: 2, c: 3}, {a: 1, c: 4}) => {a: 1, b: 2, c: 4} - end - end - - def test_nested - assert_pattern do - OpenAI::Util.deep_merge({b: {b2: 1}}, {b: {b2: 2}}) => {b: {b2: 2}} - end - end - - def test_nested_left_map - assert_pattern do - OpenAI::Util.deep_merge({b: {b2: 1}}, {b: 6}) => {b: 6} - end - end - - def test_omission - merged = OpenAI::Util.deep_merge( - {b: {b2: 1, b3: {c: 4, d: 5}}}, - {b: {b2: 1, b3: {c: OpenAI::Util::OMIT, d: 5}}} - ) - - assert_pattern do - merged => {b: {b2: 1, b3: {d: 5}}} - end - end - - def test_concat - merged = OpenAI::Util.deep_merge( - {a: {b: [1, 2]}}, - {a: {b: [3, 4]}}, - concat: true - ) - - assert_pattern do - merged => {a: {b: [1, 2, 3, 4]}} - end - end - - def test_concat_false - merged = OpenAI::Util.deep_merge( - {a: {b: [1, 2]}}, - {a: {b: [3, 4]}}, - concat: false - ) - - assert_pattern do - merged => {a: {b: [3, 4]}} - end - end - - def test_dig - assert_pattern do - OpenAI::Util.dig(1, nil) => 1 - OpenAI::Util.dig({a: 1}, :b) => nil - OpenAI::Util.dig({a: 1}, :a) => 1 - OpenAI::Util.dig({a: {b: 1}}, [:a, :b]) => 1 - - OpenAI::Util.dig([], 1) => nil - OpenAI::Util.dig([nil, [nil, 1]], [1, 1]) => 1 - OpenAI::Util.dig({a: [nil, 1]}, [:a, 1]) => 1 - OpenAI::Util.dig([], 1.0) => nil - - OpenAI::Util.dig(Object, 1) => nil - OpenAI::Util.dig([], 1.0, 2) => 2 - OpenAI::Util.dig([], 1.0) { 2 } => 2 - end - end -end - -class OpenAI::Test::UtilUriHandlingTest < Minitest::Test - def test_parsing - %w[ - http://example.com - https://example.com/ - https://example.com:443/example?e1=e1&e2=e2&e= - ].each do |url| - parsed = OpenAI::Util.parse_uri(url) - unparsed = OpenAI::Util.unparse_uri(parsed).to_s - - assert_equal(url, unparsed) - assert_equal(parsed, OpenAI::Util.parse_uri(unparsed)) - end - end - - def test_joining - cases = [ - [ - "h://a.b/c?d=e", - "h://nope/ignored", - OpenAI::Util.parse_uri("h://a.b/c?d=e") - ], - [ - "h://a.b/c?d=e", - "h://nope", - { - host: "a.b", - path: "/c", - query: {"d" => ["e"]} - } - ] - ] - - cases.each do |expect, lhs, rhs| - assert_equal( - URI.parse(expect), - OpenAI::Util.join_parsed_uri( - OpenAI::Util.parse_uri(lhs), - rhs - ) - ) - end - end - - def test_joining_queries - base_url = "h://a.b/c?d=e" - cases = { - "c2" => "h://a.b/c/c2", - "/c2?f=g" => "h://a.b/c2?f=g", - "/c?f=g" => "h://a.b/c?d=e&f=g" - } - - cases.each do |path, expected| - assert_equal( - URI.parse(expected), - OpenAI::Util.join_parsed_uri( - OpenAI::Util.parse_uri(base_url), - {path: path} - ) - ) - end - end -end - -class OpenAI::Test::UtilFormDataEncodingTest < Minitest::Test - class FakeCGI < CGI - def initialize(headers, io) - @ctype = headers["content-type"] - @io = io - super() - end - - def stdinput = @io - - def env_table - { - "REQUEST_METHOD" => "POST", - "CONTENT_TYPE" => @ctype, - "CONTENT_LENGTH" => stdinput.string.length - } - end - end - - def test_file_encode - headers = {"content-type" => "multipart/form-data"} - cases = { - StringIO.new("abc") => "abc" - } - cases.each do |body, val| - encoded = OpenAI::Util.encode_content(headers, body) - cgi = FakeCGI.new(*encoded) - assert_pattern do - cgi[""] => ^val - end - end - end - - def test_hash_encode - headers = {"content-type" => "multipart/form-data"} - cases = { - {a: 2, b: 3} => {"a" => "2", "b" => "3"}, - {a: 2, b: nil} => {"a" => "2", "b" => "null"}, - {a: 2, b: [1, 2, 3]} => {"a" => "2", "b" => "1"}, - {file: StringIO.new("a")} => {"file" => "a"} - } - cases.each do |body, testcase| - encoded = OpenAI::Util.encode_content(headers, body) - cgi = FakeCGI.new(*encoded) - testcase.each do |key, val| - assert_equal(val, cgi[key]) - end - end - end -end - -class OpenAI::Test::UtilFusedEnumTest < Minitest::Test - def test_closing - arr = [1, 2, 3] - once = 0 - fused = OpenAI::Util.fused_enum(arr.to_enum) do - once = once.succ - end - - enumerated_1 = fused.to_a - assert_equal(arr, enumerated_1) - assert_equal(1, once) - - enumerated_2 = fused.to_a - assert_equal([], enumerated_2) - assert_equal(1, once) - end - - def test_rewind_chain - once = 0 - fused = OpenAI::Util.fused_enum([1, 2, 3].to_enum) do - once = once.succ - end - .lazy - .map(&:succ) - .filter(&:odd?) - first = fused.next - - assert_equal(3, first) - assert_equal(0, once) - assert_raises(StopIteration) { fused.rewind.next } - assert_equal(1, once) - end - - def test_external_iteration - it = [1, 2, 3].to_enum - first = it.next - fused = OpenAI::Util.fused_enum(it, external: true) - - assert_equal(1, first) - assert_equal([2, 3], fused.to_a) - end - - def test_close_fused - once = 0 - fused = OpenAI::Util.fused_enum([1, 2, 3].to_enum) do - once = once.succ - end - - OpenAI::Util.close_fused!(fused) - - assert_equal(1, once) - assert_equal([], fused.to_a) - assert_equal(1, once) - end - - def test_closed_fused_extern_iteration - taken = 0 - enum = [1, 2, 3].to_enum.lazy.map do - taken = taken.succ - _1 - end - fused = OpenAI::Util.fused_enum(enum) - first = fused.next - - assert_equal(1, first) - OpenAI::Util.close_fused!(fused) - assert_equal(1, taken) - end - - def test_closed_fused_taken_count - taken = 0 - enum = [1, 2, 3].to_enum.lazy.map do - taken = taken.succ - _1 - end - .map(&:succ) - .filter(&:odd?) - fused = OpenAI::Util.fused_enum(enum) - - assert_equal(0, taken) - OpenAI::Util.close_fused!(fused) - assert_equal(0, taken) - end - - def test_closed_fused_extern_iter_taken_count - taken = 0 - enum = [1, 2, 3].to_enum.lazy.map do - taken = taken.succ - _1 - end - .map(&:succ) - .filter(&:itself) - first = enum.next - assert_equal(2, first) - assert_equal(1, taken) - - fused = OpenAI::Util.fused_enum(enum) - OpenAI::Util.close_fused!(fused) - assert_equal(1, taken) - end - - def test_close_fused_sse_chain - taken = 0 - enum = [1, 2, 3].to_enum.lazy.map do - taken = taken.succ - _1 - end - .map(&:succ) - .filter(&:odd?) - .map(&:to_s) - - fused_1 = OpenAI::Util.fused_enum(enum) - fused_2 = OpenAI::Util.decode_lines(fused_1) - fused_3 = OpenAI::Util.decode_sse(fused_2) - - assert_equal(0, taken) - OpenAI::Util.close_fused!(fused_3) - assert_equal(0, taken) - end -end - -class OpenAI::Test::UtilSseTest < Minitest::Test - def test_decode_lines - cases = { - %w[] => %w[], - %W[\n\n] => %W[\n \n], - %W[\n \n] => %W[\n \n], - %w[a] => %w[a], - %W[a\nb] => %W[a\n b], - %W[a\nb\n] => %W[a\n b\n], - %W[\na b\n] => %W[\n ab\n], - %W[\na b\n\n] => %W[\n ab\n \n], - %W[\na b] => %W[\n ab], - %W[\u1F62E\u200D\u1F4A8] => %W[\u1F62E\u200D\u1F4A8], - %W[\u1F62E \u200D \u1F4A8] => %W[\u1F62E\u200D\u1F4A8] - } - eols = %W[\n \r \r\n] - cases.each do |enum, expected| - eols.each do |eol| - lines = OpenAI::Util.decode_lines(enum.map { _1.gsub("\n", eol) }) - assert_equal(expected.map { _1.gsub("\n", eol) }, lines.to_a, "eol=#{JSON.generate(eol)}") - end - end - end - - def test_mixed_decode_lines - cases = { - %w[] => %w[], - %W[\r\r] => %W[\r \r], - %W[\r \r] => %W[\r \r], - %W[\r\r\r] => %W[\r \r \r], - %W[\r\r \r] => %W[\r \r \r], - %W[\r \n] => %W[\r\n], - %W[\r\r\n] => %W[\r \r\n], - %W[\n\r] => %W[\n \r] - } - cases.each do |enum, expected| - lines = OpenAI::Util.decode_lines(enum) - assert_equal(expected, lines.to_a) - end - end - - def test_decode_sse - cases = { - "empty input" => { - [] => [] - }, - "single data event" => { - [ - "data: hello world\n", - "\n" - ] => [ - {data: "hello world\n"} - ] - }, - "multiple data lines" => { - [ - "data: line 1\n", - "data: line 2\n", - "\n" - ] => [ - {data: "line 1\nline 2\n"} - ] - }, - "complete event" => { - [ - "id: 123\n", - "event: update\n", - "data: hello world\n", - "retry: 5000\n", - "\n" - ] => [ - { - event: "update", - id: "123", - data: "hello world\n", - retry: 5000 - } - ] - }, - "multiple events" => { - [ - "event: update\n", - "data: first\n", - "\n", - "event: message\n", - "data: second\n", - "\n" - ] => [ - {event: "update", data: "first\n"}, - {event: "message", data: "second\n"} - ] - }, - "comments" => { - [ - ": this is a comment\n", - "data: actual data\n", - "\n" - ] => [ - {data: "actual data\n"} - ] - }, - "invalid retry" => { - [ - "retry: not a number\n", - "data: hello\n", - "\n" - ] => [ - {data: "hello\n"} - ] - }, - "invalid id with null" => { - [ - "id: bad\0id\n", - "data: hello\n", - "\n" - ] => [ - {data: "hello\n"} - ] - }, - "leading space in value" => { - [ - "data: hello world\n", - "data: leading space\n", - "\n" - ] => [ - {data: "hello world\n leading space\n"} - ] - }, - "no final newline" => { - [ - "data: hello\n", - "id: 1" - ] => [ - {data: "hello\n", id: "1"} - ] - }, - "multiple empty lines" => { - [ - "data: first\n", - "\n", - "\n", - "data: second\n", - "\n" - ] => [ - {data: "first\n"}, - {data: "second\n"} - ] - }, - "multibyte unicode" => { - [ - "data: \u1F62E\u200D\u1F4A8\n" - ] => [ - {data: "\u1F62E\u200D\u1F4A8\n"} - ] - } - } - - cases.each do |name, test_cases| - test_cases.each do |input, expected| - actual = OpenAI::Util.decode_sse(input).map(&:compact) - assert_equal(expected, actual, name) - end - end - end -end